target
int64 0
1
| func
stringlengths 7
484k
| func_no_comments
stringlengths 7
484k
| idx
int64 1
368k
|
---|---|---|---|
0 | static int floppy_probe_device(const char *filename) { int fd, ret; int prio = 0; struct floppy_struct fdparam; struct stat st; if (strstart(filename, "/dev/fd", NULL)) prio = 50; fd = open(filename, O_RDONLY | O_NONBLOCK); if (fd < 0) { goto out; } ret = fstat(fd, &st); if (ret == -1 || !S_ISBLK(st.st_mode)) { goto outc; } /* Attempt to detect via a floppy specific ioctl */ ret = ioctl(fd, FDGETPRM, &fdparam); if (ret >= 0) prio = 100; outc: close(fd); out: return prio; } | static int floppy_probe_device(const char *filename) { int fd, ret; int prio = 0; struct floppy_struct fdparam; struct stat st; if (strstart(filename, "/dev/fd", NULL)) prio = 50; fd = open(filename, O_RDONLY | O_NONBLOCK); if (fd < 0) { goto out; } ret = fstat(fd, &st); if (ret == -1 || !S_ISBLK(st.st_mode)) { goto outc; } ret = ioctl(fd, FDGETPRM, &fdparam); if (ret >= 0) prio = 100; outc: close(fd); out: return prio; } | 1,282 |
0 | static lbmpdm_definition_t * lbmpdm_definition_find ( guint64 channel , guint32 ID , guint8 version_major , guint8 version_minor ) {
lbmpdm_definition_t * entry = NULL ;
guint32 keyval [ LBMPDM_DEFINITION_KEY_ELEMENT_COUNT ] ;
wmem_tree_key_t tkey [ 2 ] ;
lbmpdm_definition_build_key ( keyval , tkey , channel , ID , version_major , version_minor ) ;
entry = ( lbmpdm_definition_t * ) wmem_tree_lookup32_array ( lbmpdm_definition_table , tkey ) ;
return ( entry ) ;
} | static lbmpdm_definition_t * lbmpdm_definition_find ( guint64 channel , guint32 ID , guint8 version_major , guint8 version_minor ) {
lbmpdm_definition_t * entry = NULL ;
guint32 keyval [ LBMPDM_DEFINITION_KEY_ELEMENT_COUNT ] ;
wmem_tree_key_t tkey [ 2 ] ;
lbmpdm_definition_build_key ( keyval , tkey , channel , ID , version_major , version_minor ) ;
entry = ( lbmpdm_definition_t * ) wmem_tree_lookup32_array ( lbmpdm_definition_table , tkey ) ;
return ( entry ) ;
} | 1,283 |
0 | struct XenDevice *xen_be_find_xendev(const char *type, int dom, int dev) { struct XenDevice *xendev; TAILQ_FOREACH(xendev, &xendevs, next) { if (xendev->dom != dom) continue; if (xendev->dev != dev) continue; if (strcmp(xendev->type, type) != 0) continue; return xendev; } return NULL; } | struct XenDevice *xen_be_find_xendev(const char *type, int dom, int dev) { struct XenDevice *xendev; TAILQ_FOREACH(xendev, &xendevs, next) { if (xendev->dom != dom) continue; if (xendev->dev != dev) continue; if (strcmp(xendev->type, type) != 0) continue; return xendev; } return NULL; } | 1,285 |
1 | static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
kvm_queue_exception(vcpu, UD_VECTOR);
return EMULATE_FAIL;
} | static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
kvm_queue_exception(vcpu, UD_VECTOR);
return EMULATE_FAIL;
} | 1,287 |
0 | static int dissect_h245_INTEGER_96_127 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_constrained_integer ( tvb , offset , actx , tree , hf_index , 96U , 127U , NULL , FALSE ) ;
return offset ;
} | static int dissect_h245_INTEGER_96_127 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) {
offset = dissect_per_constrained_integer ( tvb , offset , actx , tree , hf_index , 96U , 127U , NULL , FALSE ) ;
return offset ;
} | 1,288 |
1 | ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry,
char *virt)
{
int rc;
rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
0, crypt_stat->num_header_bytes_at_front);
if (rc)
printk(KERN_ERR "%s: Error attempting to write header "
"information to lower file; rc = [%d]\n", __func__,
rc);
return rc;
} | ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry,
char *virt)
{
int rc;
rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
0, crypt_stat->num_header_bytes_at_front);
if (rc)
printk(KERN_ERR "%s: Error attempting to write header "
"information to lower file; rc = [%d]\n", __func__,
rc);
return rc;
} | 1,289 |
0 | static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
int r = EMULATE_DONE;
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
if (!is_guest_mode(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
r = EMULATE_FAIL;
}
kvm_queue_exception(vcpu, UD_VECTOR);
return r;
} | static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
int r = EMULATE_DONE;
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
if (!is_guest_mode(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
r = EMULATE_FAIL;
}
kvm_queue_exception(vcpu, UD_VECTOR);
return r;
} | 1,290 |
1 | int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
int sel_mode, new_sel_start, new_sel_end, spc;
char *bp, *obp;
int i, ps, pe, multiplier;
u16 c;
struct kbd_struct *kbd = kbd_table + fg_console;
poke_blanked_console();
{ unsigned short xs, ys, xe, ye;
if (!access_ok(VERIFY_READ, sel, sizeof(*sel)))
return -EFAULT;
__get_user(xs, &sel->xs);
__get_user(ys, &sel->ys);
__get_user(xe, &sel->xe);
__get_user(ye, &sel->ye);
__get_user(sel_mode, &sel->sel_mode);
xs--; ys--; xe--; ye--;
xs = limit(xs, vc->vc_cols - 1);
ys = limit(ys, vc->vc_rows - 1);
xe = limit(xe, vc->vc_cols - 1);
ye = limit(ye, vc->vc_rows - 1);
ps = ys * vc->vc_size_row + (xs << 1);
pe = ye * vc->vc_size_row + (xe << 1);
if (sel_mode == TIOCL_SELCLEAR) {
/* useful for screendump without selection highlights */
clear_selection();
return 0;
}
if (mouse_reporting() && (sel_mode & TIOCL_SELMOUSEREPORT)) {
mouse_report(tty, sel_mode & TIOCL_SELBUTTONMASK, xs, ys);
return 0;
}
}
if (ps > pe) /* make sel_start <= sel_end */
{
int tmp = ps;
ps = pe;
pe = tmp;
}
if (sel_cons != vc_cons[fg_console].d) {
clear_selection();
sel_cons = vc_cons[fg_console].d;
}
use_unicode = kbd && kbd->kbdmode == VC_UNICODE;
switch (sel_mode)
{
case TIOCL_SELCHAR: /* character-by-character selection */
new_sel_start = ps;
new_sel_end = pe;
break;
case TIOCL_SELWORD: /* word-by-word selection */
spc = isspace(sel_pos(ps));
for (new_sel_start = ps; ; ps -= 2)
{
if ((spc && !isspace(sel_pos(ps))) ||
(!spc && !inword(sel_pos(ps))))
break;
new_sel_start = ps;
if (!(ps % vc->vc_size_row))
break;
}
spc = isspace(sel_pos(pe));
for (new_sel_end = pe; ; pe += 2)
{
if ((spc && !isspace(sel_pos(pe))) ||
(!spc && !inword(sel_pos(pe))))
break;
new_sel_end = pe;
if (!((pe + 2) % vc->vc_size_row))
break;
}
break;
case TIOCL_SELLINE: /* line-by-line selection */
new_sel_start = ps - ps % vc->vc_size_row;
new_sel_end = pe + vc->vc_size_row
- pe % vc->vc_size_row - 2;
break;
case TIOCL_SELPOINTER:
highlight_pointer(pe);
return 0;
default:
return -EINVAL;
}
/* remove the pointer */
highlight_pointer(-1);
/* select to end of line if on trailing space */
if (new_sel_end > new_sel_start &&
!atedge(new_sel_end, vc->vc_size_row) &&
isspace(sel_pos(new_sel_end))) {
for (pe = new_sel_end + 2; ; pe += 2)
if (!isspace(sel_pos(pe)) ||
atedge(pe, vc->vc_size_row))
break;
if (isspace(sel_pos(pe)))
new_sel_end = pe;
}
if (sel_start == -1) /* no current selection */
highlight(new_sel_start, new_sel_end);
else if (new_sel_start == sel_start)
{
if (new_sel_end == sel_end) /* no action required */
return 0;
else if (new_sel_end > sel_end) /* extend to right */
highlight(sel_end + 2, new_sel_end);
else /* contract from right */
highlight(new_sel_end + 2, sel_end);
}
else if (new_sel_end == sel_end)
{
if (new_sel_start < sel_start) /* extend to left */
highlight(new_sel_start, sel_start - 2);
else /* contract from left */
highlight(sel_start, new_sel_start - 2);
}
else /* some other case; start selection from scratch */
{
clear_selection();
highlight(new_sel_start, new_sel_end);
}
sel_start = new_sel_start;
sel_end = new_sel_end;
/* Allocate a new buffer before freeing the old one ... */
multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */
bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL);
if (!bp) {
printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
return -ENOMEM;
}
kfree(sel_buffer);
sel_buffer = bp;
obp = bp;
for (i = sel_start; i <= sel_end; i += 2) {
c = sel_pos(i);
if (use_unicode)
bp += store_utf8(c, bp);
else
*bp++ = c;
if (!isspace(c))
obp = bp;
if (! ((i + 2) % vc->vc_size_row)) {
/* strip trailing blanks from line and add newline,
unless non-space at end of line. */
if (obp != bp) {
bp = obp;
*bp++ = '\r';
}
obp = bp;
}
}
sel_buffer_lth = bp - sel_buffer;
return 0;
} | int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
int sel_mode, new_sel_start, new_sel_end, spc;
char *bp, *obp;
int i, ps, pe, multiplier;
u16 c;
struct kbd_struct *kbd = kbd_table + fg_console;
poke_blanked_console();
{ unsigned short xs, ys, xe, ye;
if (!access_ok(VERIFY_READ, sel, sizeof(*sel)))
return -EFAULT;
__get_user(xs, &sel->xs);
__get_user(ys, &sel->ys);
__get_user(xe, &sel->xe);
__get_user(ye, &sel->ye);
__get_user(sel_mode, &sel->sel_mode);
xs--; ys--; xe--; ye--;
xs = limit(xs, vc->vc_cols - 1);
ys = limit(ys, vc->vc_rows - 1);
xe = limit(xe, vc->vc_cols - 1);
ye = limit(ye, vc->vc_rows - 1);
ps = ys * vc->vc_size_row + (xs << 1);
pe = ye * vc->vc_size_row + (xe << 1);
if (sel_mode == TIOCL_SELCLEAR) {
clear_selection();
return 0;
}
if (mouse_reporting() && (sel_mode & TIOCL_SELMOUSEREPORT)) {
mouse_report(tty, sel_mode & TIOCL_SELBUTTONMASK, xs, ys);
return 0;
}
}
if (ps > pe)
{
int tmp = ps;
ps = pe;
pe = tmp;
}
if (sel_cons != vc_cons[fg_console].d) {
clear_selection();
sel_cons = vc_cons[fg_console].d;
}
use_unicode = kbd && kbd->kbdmode == VC_UNICODE;
switch (sel_mode)
{
case TIOCL_SELCHAR:
new_sel_start = ps;
new_sel_end = pe;
break;
case TIOCL_SELWORD:
spc = isspace(sel_pos(ps));
for (new_sel_start = ps; ; ps -= 2)
{
if ((spc && !isspace(sel_pos(ps))) ||
(!spc && !inword(sel_pos(ps))))
break;
new_sel_start = ps;
if (!(ps % vc->vc_size_row))
break;
}
spc = isspace(sel_pos(pe));
for (new_sel_end = pe; ; pe += 2)
{
if ((spc && !isspace(sel_pos(pe))) ||
(!spc && !inword(sel_pos(pe))))
break;
new_sel_end = pe;
if (!((pe + 2) % vc->vc_size_row))
break;
}
break;
case TIOCL_SELLINE:
new_sel_start = ps - ps % vc->vc_size_row;
new_sel_end = pe + vc->vc_size_row
- pe % vc->vc_size_row - 2;
break;
case TIOCL_SELPOINTER:
highlight_pointer(pe);
return 0;
default:
return -EINVAL;
}
highlight_pointer(-1);
if (new_sel_end > new_sel_start &&
!atedge(new_sel_end, vc->vc_size_row) &&
isspace(sel_pos(new_sel_end))) {
for (pe = new_sel_end + 2; ; pe += 2)
if (!isspace(sel_pos(pe)) ||
atedge(pe, vc->vc_size_row))
break;
if (isspace(sel_pos(pe)))
new_sel_end = pe;
}
if (sel_start == -1)
highlight(new_sel_start, new_sel_end);
else if (new_sel_start == sel_start)
{
if (new_sel_end == sel_end)
return 0;
else if (new_sel_end > sel_end)
highlight(sel_end + 2, new_sel_end);
else
highlight(new_sel_end + 2, sel_end);
}
else if (new_sel_end == sel_end)
{
if (new_sel_start < sel_start)
highlight(new_sel_start, sel_start - 2);
else
highlight(sel_start, new_sel_start - 2);
}
else
{
clear_selection();
highlight(new_sel_start, new_sel_end);
}
sel_start = new_sel_start;
sel_end = new_sel_end;
multiplier = use_unicode ? 3 : 1;
bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL);
if (!bp) {
printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
return -ENOMEM;
}
kfree(sel_buffer);
sel_buffer = bp;
obp = bp;
for (i = sel_start; i <= sel_end; i += 2) {
c = sel_pos(i);
if (use_unicode)
bp += store_utf8(c, bp);
else
*bp++ = c;
if (!isspace(c))
obp = bp;
if (! ((i + 2) % vc->vc_size_row)) {
if (obp != bp) {
bp = obp;
*bp++ = '\r';
}
obp = bp;
}
}
sel_buffer_lth = bp - sel_buffer;
return 0;
} | 1,291 |
1 | void process_packet_tail(struct msg_digest *md)
{
struct state *st = md->st;
enum state_kind from_state = md->v1_from_state;
const struct state_v1_microcode *smc = md->smc;
bool new_iv_set = md->new_iv_set;
bool self_delete = FALSE;
if (md->hdr.isa_flags & ISAKMP_FLAGS_v1_ENCRYPTION) {
endpoint_buf b;
dbg("received encrypted packet from %s", str_endpoint(&md->sender, &b));
if (st == NULL) {
libreswan_log(
"discarding encrypted message for an unknown ISAKMP SA");
return;
}
if (st->st_skeyid_e_nss == NULL) {
loglog(RC_LOG_SERIOUS,
"discarding encrypted message because we haven't yet negotiated keying material");
return;
}
/* Mark as encrypted */
md->encrypted = TRUE;
/* do the specified decryption
*
* IV is from st->st_iv or (if new_iv_set) st->st_new_iv.
* The new IV is placed in st->st_new_iv
*
* See RFC 2409 "IKE" Appendix B
*
* XXX The IV should only be updated really if the packet
* is successfully processed.
* We should keep this value, check for a success return
* value from the parsing routines and then replace.
*
* Each post phase 1 exchange generates IVs from
* the last phase 1 block, not the last block sent.
*/
const struct encrypt_desc *e = st->st_oakley.ta_encrypt;
if (pbs_left(&md->message_pbs) % e->enc_blocksize != 0) {
loglog(RC_LOG_SERIOUS, "malformed message: not a multiple of encryption blocksize");
return;
}
/* XXX Detect weak keys */
/* grab a copy of raw packet (for duplicate packet detection) */
md->raw_packet = clone_bytes_as_chunk(md->packet_pbs.start,
pbs_room(&md->packet_pbs),
"raw packet");
/* Decrypt everything after header */
if (!new_iv_set) {
if (st->st_v1_iv.len == 0) {
init_phase2_iv(st, &md->hdr.isa_msgid);
} else {
/* use old IV */
restore_new_iv(st, st->st_v1_iv);
}
}
passert(st->st_v1_new_iv.len >= e->enc_blocksize);
st->st_v1_new_iv.len = e->enc_blocksize; /* truncate */
if (DBGP(DBG_CRYPT)) {
DBG_log("decrypting %u bytes using algorithm %s",
(unsigned) pbs_left(&md->message_pbs),
st->st_oakley.ta_encrypt->common.fqn);
DBG_dump_hunk("IV before:", st->st_v1_new_iv);
}
e->encrypt_ops->do_crypt(e, md->message_pbs.cur,
pbs_left(&md->message_pbs),
st->st_enc_key_nss,
st->st_v1_new_iv.ptr, FALSE);
if (DBGP(DBG_CRYPT)) {
DBG_dump_hunk("IV after:", st->st_v1_new_iv);
DBG_log("decrypted payload (starts at offset %td):",
md->message_pbs.cur - md->message_pbs.roof);
DBG_dump(NULL, md->message_pbs.start,
md->message_pbs.roof - md->message_pbs.start);
}
} else {
/* packet was not encryped -- should it have been? */
if (smc->flags & SMF_INPUT_ENCRYPTED) {
loglog(RC_LOG_SERIOUS,
"packet rejected: should have been encrypted");
SEND_NOTIFICATION(INVALID_FLAGS);
return;
}
}
/* Digest the message.
* Padding must be removed to make hashing work.
* Padding comes from encryption (so this code must be after decryption).
* Padding rules are described before the definition of
* struct isakmp_hdr in packet.h.
*/
{
enum next_payload_types_ikev1 np = md->hdr.isa_np;
lset_t needed = smc->req_payloads;
const char *excuse =
LIN(SMF_PSK_AUTH | SMF_FIRST_ENCRYPTED_INPUT,
smc->flags) ?
"probable authentication failure (mismatch of preshared secrets?): "
:
"";
while (np != ISAKMP_NEXT_NONE) {
struct_desc *sd = v1_payload_desc(np);
if (md->digest_roof >= elemsof(md->digest)) {
loglog(RC_LOG_SERIOUS,
"more than %zu payloads in message; ignored",
elemsof(md->digest));
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
struct payload_digest *const pd = md->digest + md->digest_roof;
/*
* only do this in main mode. In aggressive mode, there
* is no negotiation of NAT-T method. Get it right.
*/
if (st != NULL && st->st_connection != NULL &&
(st->st_connection->policy & POLICY_AGGRESSIVE) == LEMPTY)
{
switch (np) {
case ISAKMP_NEXT_NATD_RFC:
case ISAKMP_NEXT_NATOA_RFC:
if ((st->hidden_variables.st_nat_traversal & NAT_T_WITH_RFC_VALUES) == LEMPTY) {
/*
* don't accept NAT-D/NAT-OA reloc directly in message,
* unless we're using NAT-T RFC
*/
DBG(DBG_NATT,
DBG_log("st_nat_traversal was: %s",
bitnamesof(natt_bit_names,
st->hidden_variables.st_nat_traversal)));
sd = NULL;
}
break;
default:
break;
}
}
if (sd == NULL) {
/* payload type is out of range or requires special handling */
switch (np) {
case ISAKMP_NEXT_ID:
/* ??? two kinds of ID payloads */
sd = (IS_PHASE1(from_state) ||
IS_PHASE15(from_state)) ?
&isakmp_identification_desc :
&isakmp_ipsec_identification_desc;
break;
case ISAKMP_NEXT_NATD_DRAFTS: /* out of range */
/*
* ISAKMP_NEXT_NATD_DRAFTS was a private use type before RFC-3947.
* Since it has the same format as ISAKMP_NEXT_NATD_RFC,
* just rewrite np and sd, and carry on.
*/
np = ISAKMP_NEXT_NATD_RFC;
sd = &isakmp_nat_d_drafts;
break;
case ISAKMP_NEXT_NATOA_DRAFTS: /* out of range */
/* NAT-OA was a private use type before RFC-3947 -- same format */
np = ISAKMP_NEXT_NATOA_RFC;
sd = &isakmp_nat_oa_drafts;
break;
case ISAKMP_NEXT_SAK: /* or ISAKMP_NEXT_NATD_BADDRAFTS */
/*
* Official standards say that this is ISAKMP_NEXT_SAK,
* a part of Group DOI, something we don't implement.
* Old non-updated Cisco gear abused this number in ancient NAT drafts.
* We ignore (rather than reject) this in support of people
* with crufty Cisco machines.
*/
loglog(RC_LOG_SERIOUS,
"%smessage with unsupported payload ISAKMP_NEXT_SAK (or ISAKMP_NEXT_NATD_BADDRAFTS) ignored",
excuse);
/*
* Hack to discard payload, whatever it was.
* Since we are skipping the rest of the loop
* body we must do some things ourself:
* - demarshall the payload
* - grab the next payload number (np)
* - don't keep payload (don't increment pd)
* - skip rest of loop body
*/
if (!in_struct(&pd->payload, &isakmp_ignore_desc, &md->message_pbs,
&pd->pbs)) {
loglog(RC_LOG_SERIOUS,
"%smalformed payload in packet",
excuse);
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
np = pd->payload.generic.isag_np;
/* NOTE: we do not increment pd! */
continue; /* skip rest of the loop */
default:
loglog(RC_LOG_SERIOUS,
"%smessage ignored because it contains an unknown or unexpected payload type (%s) at the outermost level",
excuse,
enum_show(&ikev1_payload_names, np));
if (!md->encrypted) {
SEND_NOTIFICATION(INVALID_PAYLOAD_TYPE);
}
return;
}
passert(sd != NULL);
}
passert(np < LELEM_ROOF);
{
lset_t s = LELEM(np);
if (LDISJOINT(s,
needed | smc->opt_payloads |
LELEM(ISAKMP_NEXT_VID) |
LELEM(ISAKMP_NEXT_N) |
LELEM(ISAKMP_NEXT_D) |
LELEM(ISAKMP_NEXT_CR) |
LELEM(ISAKMP_NEXT_CERT))) {
loglog(RC_LOG_SERIOUS,
"%smessage ignored because it contains a payload type (%s) unexpected by state %s",
excuse,
enum_show(&ikev1_payload_names, np),
st->st_state->name);
if (!md->encrypted) {
SEND_NOTIFICATION(INVALID_PAYLOAD_TYPE);
}
return;
}
DBG(DBG_PARSING,
DBG_log("got payload 0x%" PRIxLSET" (%s) needed: 0x%" PRIxLSET " opt: 0x%" PRIxLSET,
s, enum_show(&ikev1_payload_names, np),
needed, smc->opt_payloads));
needed &= ~s;
}
/*
* Read in the payload recording what type it
* should be
*/
pd->payload_type = np;
if (!in_struct(&pd->payload, sd, &md->message_pbs,
&pd->pbs)) {
loglog(RC_LOG_SERIOUS,
"%smalformed payload in packet",
excuse);
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
/* do payload-type specific debugging */
switch (np) {
case ISAKMP_NEXT_ID:
case ISAKMP_NEXT_NATOA_RFC:
/* dump ID section */
DBG(DBG_PARSING,
DBG_dump(" obj: ", pd->pbs.cur,
pbs_left(&pd->pbs)));
break;
default:
break;
}
/*
* Place payload at the end of the chain for this type.
* This code appears in ikev1.c and ikev2.c.
*/
{
/* np is a proper subscript for chain[] */
passert(np < elemsof(md->chain));
struct payload_digest **p = &md->chain[np];
while (*p != NULL)
p = &(*p)->next;
*p = pd;
pd->next = NULL;
}
np = pd->payload.generic.isag_np;
md->digest_roof++;
/* since we've digested one payload happily, it is probably
* the case that any decryption worked. So we will not suggest
* encryption failure as an excuse for subsequent payload
* problems.
*/
excuse = "";
}
DBG(DBG_PARSING, {
if (pbs_left(&md->message_pbs) != 0)
DBG_log("removing %d bytes of padding",
(int) pbs_left(&md->message_pbs));
});
md->message_pbs.roof = md->message_pbs.cur;
/* check that all mandatory payloads appeared */
if (needed != 0) {
loglog(RC_LOG_SERIOUS,
"message for %s is missing payloads %s",
finite_states[from_state]->name,
bitnamesof(payload_name_ikev1, needed));
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
}
if (!check_v1_HASH(smc->hash_type, smc->message, st, md)) {
/*SEND_NOTIFICATION(INVALID_HASH_INFORMATION);*/
return;
}
/* more sanity checking: enforce most ordering constraints */
if (IS_PHASE1(from_state) || IS_PHASE15(from_state)) {
/* rfc2409: The Internet Key Exchange (IKE), 5 Exchanges:
* "The SA payload MUST precede all other payloads in a phase 1 exchange."
*/
if (md->chain[ISAKMP_NEXT_SA] != NULL &&
md->hdr.isa_np != ISAKMP_NEXT_SA) {
loglog(RC_LOG_SERIOUS,
"malformed Phase 1 message: does not start with an SA payload");
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
} else if (IS_QUICK(from_state)) {
/* rfc2409: The Internet Key Exchange (IKE), 5.5 Phase 2 - Quick Mode
*
* "In Quick Mode, a HASH payload MUST immediately follow the ISAKMP
* header and a SA payload MUST immediately follow the HASH."
* [NOTE: there may be more than one SA payload, so this is not
* totally reasonable. Probably all SAs should be so constrained.]
*
* "If ISAKMP is acting as a client negotiator on behalf of another
* party, the identities of the parties MUST be passed as IDci and
* then IDcr."
*
* "With the exception of the HASH, SA, and the optional ID payloads,
* there are no payload ordering restrictions on Quick Mode."
*/
if (md->hdr.isa_np != ISAKMP_NEXT_HASH) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: does not start with a HASH payload");
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
{
struct payload_digest *p;
int i;
p = md->chain[ISAKMP_NEXT_SA];
i = 1;
while (p != NULL) {
if (p != &md->digest[i]) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: SA payload is in wrong position");
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
p = p->next;
i++;
}
}
/* rfc2409: The Internet Key Exchange (IKE), 5.5 Phase 2 - Quick Mode:
* "If ISAKMP is acting as a client negotiator on behalf of another
* party, the identities of the parties MUST be passed as IDci and
* then IDcr."
*/
{
struct payload_digest *id = md->chain[ISAKMP_NEXT_ID];
if (id != NULL) {
if (id->next == NULL ||
id->next->next != NULL) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: if any ID payload is present, there must be exactly two");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
if (id + 1 != id->next) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: the ID payloads are not adjacent");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
}
}
}
/*
* Ignore payloads that we don't handle:
*/
/* XXX Handle Notifications */
{
struct payload_digest *p = md->chain[ISAKMP_NEXT_N];
while (p != NULL) {
switch (p->payload.notification.isan_type) {
case R_U_THERE:
case R_U_THERE_ACK:
case ISAKMP_N_CISCO_LOAD_BALANCE:
case PAYLOAD_MALFORMED:
case INVALID_MESSAGE_ID:
case IPSEC_RESPONDER_LIFETIME:
if (md->hdr.isa_xchg == ISAKMP_XCHG_INFO) {
/* these are handled later on in informational() */
break;
}
/* FALL THROUGH */
default:
if (st == NULL) {
DBG(DBG_CONTROL, DBG_log(
"ignoring informational payload %s, no corresponding state",
enum_show(& ikev1_notify_names,
p->payload.notification.isan_type)));
} else {
loglog(RC_LOG_SERIOUS,
"ignoring informational payload %s, msgid=%08" PRIx32 ", length=%d",
enum_show(&ikev1_notify_names,
p->payload.notification.isan_type),
st->st_v1_msgid.id,
p->payload.notification.isan_length);
DBG_dump_pbs(&p->pbs);
}
}
if (DBGP(DBG_BASE)) {
DBG_dump("info:", p->pbs.cur,
pbs_left(&p->pbs));
}
p = p->next;
}
p = md->chain[ISAKMP_NEXT_D];
while (p != NULL) {
self_delete |= accept_delete(md, p);
if (DBGP(DBG_BASE)) {
DBG_dump("del:", p->pbs.cur,
pbs_left(&p->pbs));
}
if (md->st != st) {
pexpect(md->st == NULL);
dbg("zapping ST as accept_delete() zapped MD.ST");
st = md->st;
}
p = p->next;
}
p = md->chain[ISAKMP_NEXT_VID];
while (p != NULL) {
handle_vendorid(md, (char *)p->pbs.cur,
pbs_left(&p->pbs), FALSE);
p = p->next;
}
}
if (self_delete) {
accept_self_delete(md);
st = md->st;
/* note: st ought to be NULL from here on */
}
pexpect(st == md->st);
statetime_t start = statetime_start(md->st);
/*
* XXX: danger - the .informational() processor deletes ST;
* and then tunnels this loss through MD.ST.
*/
complete_v1_state_transition(md, smc->processor(st, md));
statetime_stop(&start, "%s()", __func__);
/* our caller will release_any_md(mdp); */
} | void process_packet_tail(struct msg_digest *md)
{
struct state *st = md->st;
enum state_kind from_state = md->v1_from_state;
const struct state_v1_microcode *smc = md->smc;
bool new_iv_set = md->new_iv_set;
bool self_delete = FALSE;
if (md->hdr.isa_flags & ISAKMP_FLAGS_v1_ENCRYPTION) {
endpoint_buf b;
dbg("received encrypted packet from %s", str_endpoint(&md->sender, &b));
if (st == NULL) {
libreswan_log(
"discarding encrypted message for an unknown ISAKMP SA");
return;
}
if (st->st_skeyid_e_nss == NULL) {
loglog(RC_LOG_SERIOUS,
"discarding encrypted message because we haven't yet negotiated keying material");
return;
}
md->encrypted = TRUE;
const struct encrypt_desc *e = st->st_oakley.ta_encrypt;
if (pbs_left(&md->message_pbs) % e->enc_blocksize != 0) {
loglog(RC_LOG_SERIOUS, "malformed message: not a multiple of encryption blocksize");
return;
}
md->raw_packet = clone_bytes_as_chunk(md->packet_pbs.start,
pbs_room(&md->packet_pbs),
"raw packet");
if (!new_iv_set) {
if (st->st_v1_iv.len == 0) {
init_phase2_iv(st, &md->hdr.isa_msgid);
} else {
restore_new_iv(st, st->st_v1_iv);
}
}
passert(st->st_v1_new_iv.len >= e->enc_blocksize);
st->st_v1_new_iv.len = e->enc_blocksize;
if (DBGP(DBG_CRYPT)) {
DBG_log("decrypting %u bytes using algorithm %s",
(unsigned) pbs_left(&md->message_pbs),
st->st_oakley.ta_encrypt->common.fqn);
DBG_dump_hunk("IV before:", st->st_v1_new_iv);
}
e->encrypt_ops->do_crypt(e, md->message_pbs.cur,
pbs_left(&md->message_pbs),
st->st_enc_key_nss,
st->st_v1_new_iv.ptr, FALSE);
if (DBGP(DBG_CRYPT)) {
DBG_dump_hunk("IV after:", st->st_v1_new_iv);
DBG_log("decrypted payload (starts at offset %td):",
md->message_pbs.cur - md->message_pbs.roof);
DBG_dump(NULL, md->message_pbs.start,
md->message_pbs.roof - md->message_pbs.start);
}
} else {
if (smc->flags & SMF_INPUT_ENCRYPTED) {
loglog(RC_LOG_SERIOUS,
"packet rejected: should have been encrypted");
SEND_NOTIFICATION(INVALID_FLAGS);
return;
}
}
{
enum next_payload_types_ikev1 np = md->hdr.isa_np;
lset_t needed = smc->req_payloads;
const char *excuse =
LIN(SMF_PSK_AUTH | SMF_FIRST_ENCRYPTED_INPUT,
smc->flags) ?
"probable authentication failure (mismatch of preshared secrets?): "
:
"";
while (np != ISAKMP_NEXT_NONE) {
struct_desc *sd = v1_payload_desc(np);
if (md->digest_roof >= elemsof(md->digest)) {
loglog(RC_LOG_SERIOUS,
"more than %zu payloads in message; ignored",
elemsof(md->digest));
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
struct payload_digest *const pd = md->digest + md->digest_roof;
if (st != NULL && st->st_connection != NULL &&
(st->st_connection->policy & POLICY_AGGRESSIVE) == LEMPTY)
{
switch (np) {
case ISAKMP_NEXT_NATD_RFC:
case ISAKMP_NEXT_NATOA_RFC:
if ((st->hidden_variables.st_nat_traversal & NAT_T_WITH_RFC_VALUES) == LEMPTY) {
DBG(DBG_NATT,
DBG_log("st_nat_traversal was: %s",
bitnamesof(natt_bit_names,
st->hidden_variables.st_nat_traversal)));
sd = NULL;
}
break;
default:
break;
}
}
if (sd == NULL) {
switch (np) {
case ISAKMP_NEXT_ID:
sd = (IS_PHASE1(from_state) ||
IS_PHASE15(from_state)) ?
&isakmp_identification_desc :
&isakmp_ipsec_identification_desc;
break;
case ISAKMP_NEXT_NATD_DRAFTS:
np = ISAKMP_NEXT_NATD_RFC;
sd = &isakmp_nat_d_drafts;
break;
case ISAKMP_NEXT_NATOA_DRAFTS:
np = ISAKMP_NEXT_NATOA_RFC;
sd = &isakmp_nat_oa_drafts;
break;
case ISAKMP_NEXT_SAK:
loglog(RC_LOG_SERIOUS,
"%smessage with unsupported payload ISAKMP_NEXT_SAK (or ISAKMP_NEXT_NATD_BADDRAFTS) ignored",
excuse);
if (!in_struct(&pd->payload, &isakmp_ignore_desc, &md->message_pbs,
&pd->pbs)) {
loglog(RC_LOG_SERIOUS,
"%smalformed payload in packet",
excuse);
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
np = pd->payload.generic.isag_np;
continue;
default:
loglog(RC_LOG_SERIOUS,
"%smessage ignored because it contains an unknown or unexpected payload type (%s) at the outermost level",
excuse,
enum_show(&ikev1_payload_names, np));
if (!md->encrypted) {
SEND_NOTIFICATION(INVALID_PAYLOAD_TYPE);
}
return;
}
passert(sd != NULL);
}
passert(np < LELEM_ROOF);
{
lset_t s = LELEM(np);
if (LDISJOINT(s,
needed | smc->opt_payloads |
LELEM(ISAKMP_NEXT_VID) |
LELEM(ISAKMP_NEXT_N) |
LELEM(ISAKMP_NEXT_D) |
LELEM(ISAKMP_NEXT_CR) |
LELEM(ISAKMP_NEXT_CERT))) {
loglog(RC_LOG_SERIOUS,
"%smessage ignored because it contains a payload type (%s) unexpected by state %s",
excuse,
enum_show(&ikev1_payload_names, np),
st->st_state->name);
if (!md->encrypted) {
SEND_NOTIFICATION(INVALID_PAYLOAD_TYPE);
}
return;
}
DBG(DBG_PARSING,
DBG_log("got payload 0x%" PRIxLSET" (%s) needed: 0x%" PRIxLSET " opt: 0x%" PRIxLSET,
s, enum_show(&ikev1_payload_names, np),
needed, smc->opt_payloads));
needed &= ~s;
}
pd->payload_type = np;
if (!in_struct(&pd->payload, sd, &md->message_pbs,
&pd->pbs)) {
loglog(RC_LOG_SERIOUS,
"%smalformed payload in packet",
excuse);
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
switch (np) {
case ISAKMP_NEXT_ID:
case ISAKMP_NEXT_NATOA_RFC:
DBG(DBG_PARSING,
DBG_dump(" obj: ", pd->pbs.cur,
pbs_left(&pd->pbs)));
break;
default:
break;
}
{
passert(np < elemsof(md->chain));
struct payload_digest **p = &md->chain[np];
while (*p != NULL)
p = &(*p)->next;
*p = pd;
pd->next = NULL;
}
np = pd->payload.generic.isag_np;
md->digest_roof++;
excuse = "";
}
DBG(DBG_PARSING, {
if (pbs_left(&md->message_pbs) != 0)
DBG_log("removing %d bytes of padding",
(int) pbs_left(&md->message_pbs));
});
md->message_pbs.roof = md->message_pbs.cur;
if (needed != 0) {
loglog(RC_LOG_SERIOUS,
"message for %s is missing payloads %s",
finite_states[from_state]->name,
bitnamesof(payload_name_ikev1, needed));
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
}
if (!check_v1_HASH(smc->hash_type, smc->message, st, md)) {
return;
}
if (IS_PHASE1(from_state) || IS_PHASE15(from_state)) {
if (md->chain[ISAKMP_NEXT_SA] != NULL &&
md->hdr.isa_np != ISAKMP_NEXT_SA) {
loglog(RC_LOG_SERIOUS,
"malformed Phase 1 message: does not start with an SA payload");
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
} else if (IS_QUICK(from_state)) {
if (md->hdr.isa_np != ISAKMP_NEXT_HASH) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: does not start with a HASH payload");
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
{
struct payload_digest *p;
int i;
p = md->chain[ISAKMP_NEXT_SA];
i = 1;
while (p != NULL) {
if (p != &md->digest[i]) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: SA payload is in wrong position");
if (!md->encrypted) {
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
}
return;
}
p = p->next;
i++;
}
}
{
struct payload_digest *id = md->chain[ISAKMP_NEXT_ID];
if (id != NULL) {
if (id->next == NULL ||
id->next->next != NULL) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: if any ID payload is present, there must be exactly two");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
if (id + 1 != id->next) {
loglog(RC_LOG_SERIOUS,
"malformed Quick Mode message: the ID payloads are not adjacent");
SEND_NOTIFICATION(PAYLOAD_MALFORMED);
return;
}
}
}
}
{
struct payload_digest *p = md->chain[ISAKMP_NEXT_N];
while (p != NULL) {
switch (p->payload.notification.isan_type) {
case R_U_THERE:
case R_U_THERE_ACK:
case ISAKMP_N_CISCO_LOAD_BALANCE:
case PAYLOAD_MALFORMED:
case INVALID_MESSAGE_ID:
case IPSEC_RESPONDER_LIFETIME:
if (md->hdr.isa_xchg == ISAKMP_XCHG_INFO) {
break;
}
default:
if (st == NULL) {
DBG(DBG_CONTROL, DBG_log(
"ignoring informational payload %s, no corresponding state",
enum_show(& ikev1_notify_names,
p->payload.notification.isan_type)));
} else {
loglog(RC_LOG_SERIOUS,
"ignoring informational payload %s, msgid=%08" PRIx32 ", length=%d",
enum_show(&ikev1_notify_names,
p->payload.notification.isan_type),
st->st_v1_msgid.id,
p->payload.notification.isan_length);
DBG_dump_pbs(&p->pbs);
}
}
if (DBGP(DBG_BASE)) {
DBG_dump("info:", p->pbs.cur,
pbs_left(&p->pbs));
}
p = p->next;
}
p = md->chain[ISAKMP_NEXT_D];
while (p != NULL) {
self_delete |= accept_delete(md, p);
if (DBGP(DBG_BASE)) {
DBG_dump("del:", p->pbs.cur,
pbs_left(&p->pbs));
}
if (md->st != st) {
pexpect(md->st == NULL);
dbg("zapping ST as accept_delete() zapped MD.ST");
st = md->st;
}
p = p->next;
}
p = md->chain[ISAKMP_NEXT_VID];
while (p != NULL) {
handle_vendorid(md, (char *)p->pbs.cur,
pbs_left(&p->pbs), FALSE);
p = p->next;
}
}
if (self_delete) {
accept_self_delete(md);
st = md->st;
}
pexpect(st == md->st);
statetime_t start = statetime_start(md->st);
complete_v1_state_transition(md, smc->processor(st, md));
statetime_stop(&start, "%s()", __func__);
} | 1,294 |
0 | void Messageheader::Parser::checkHeaderspace(unsigned chars) const
{
if (headerdataPtr + chars >= header.rawdata + sizeof(header.rawdata))
{
header.rawdata[sizeof(header.rawdata) - 1] = '\0';
throw HttpError(HTTP_REQUEST_ENTITY_TOO_LARGE, "header too large");
}
} | void Messageheader::Parser::checkHeaderspace(unsigned chars) const
{
if (headerdataPtr + chars >= header.rawdata + sizeof(header.rawdata))
{
header.rawdata[sizeof(header.rawdata) - 1] = '\0';
throw HttpError(HTTP_REQUEST_ENTITY_TOO_LARGE, "header too large");
}
} | 1,297 |
0 | static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, VqInfoBlockLegacy *linfo) { VirtIODevice *vdev = virtio_ccw_get_vdev(sch); uint16_t index = info ? info->index : linfo->index; uint16_t num = info ? info->num : linfo->num; uint64_t desc = info ? info->desc : linfo->queue; if (index >= VIRTIO_CCW_QUEUE_MAX) { return -EINVAL; } /* Current code in virtio.c relies on 4K alignment. */ if (linfo && desc && (linfo->align != 4096)) { return -EINVAL; } if (!vdev) { return -EINVAL; } if (info) { virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); } else { virtio_queue_set_addr(vdev, index, desc); } if (!desc) { virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); } else { if (info) { /* virtio-1 allows changing the ring size. */ if (virtio_queue_get_max_num(vdev, index) < num) { /* Fail if we exceed the maximum number. */ return -EINVAL; } virtio_queue_set_num(vdev, index, num); } else if (virtio_queue_get_num(vdev, index) > num) { /* Fail if we don't have a big enough queue. */ return -EINVAL; } /* We ignore possible increased num for legacy for compatibility. */ virtio_queue_set_vector(vdev, index, index); } /* tell notify handler in case of config change */ vdev->config_vector = VIRTIO_CCW_QUEUE_MAX; return 0; } | static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info, VqInfoBlockLegacy *linfo) { VirtIODevice *vdev = virtio_ccw_get_vdev(sch); uint16_t index = info ? info->index : linfo->index; uint16_t num = info ? info->num : linfo->num; uint64_t desc = info ? info->desc : linfo->queue; if (index >= VIRTIO_CCW_QUEUE_MAX) { return -EINVAL; } if (linfo && desc && (linfo->align != 4096)) { return -EINVAL; } if (!vdev) { return -EINVAL; } if (info) { virtio_queue_set_rings(vdev, index, desc, info->avail, info->used); } else { virtio_queue_set_addr(vdev, index, desc); } if (!desc) { virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR); } else { if (info) { if (virtio_queue_get_max_num(vdev, index) < num) { return -EINVAL; } virtio_queue_set_num(vdev, index, num); } else if (virtio_queue_get_num(vdev, index) > num) { return -EINVAL; } virtio_queue_set_vector(vdev, index, index); } vdev->config_vector = VIRTIO_CCW_QUEUE_MAX; return 0; } | 1,299 |
1 | static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
{
struct dentry *parent = desc->file->f_path.dentry;
struct inode *dir = parent->d_inode;
struct nfs_entry *entry = desc->entry;
struct dentry *dentry, *alias;
struct qstr name = {
.name = entry->name,
.len = entry->len,
};
struct inode *inode;
switch (name.len) {
case 2:
if (name.name[0] == '.' && name.name[1] == '.')
return dget_parent(parent);
break;
case 1:
if (name.name[0] == '.')
return dget(parent);
}
name.hash = full_name_hash(name.name, name.len);
dentry = d_lookup(parent, &name);
if (dentry != NULL) {
/* Is this a positive dentry that matches the readdir info? */
if (dentry->d_inode != NULL &&
(NFS_FILEID(dentry->d_inode) == entry->ino ||
d_mountpoint(dentry))) {
if (!desc->plus || entry->fh->size == 0)
return dentry;
if (nfs_compare_fh(NFS_FH(dentry->d_inode),
entry->fh) == 0)
goto out_renew;
}
/* No, so d_drop to allow one to be created */
d_drop(dentry);
dput(dentry);
}
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
return NULL;
/* Note: caller is already holding the dir->i_mutex! */
dentry = d_alloc(parent, &name);
if (dentry == NULL)
return NULL;
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
if (IS_ERR(inode)) {
dput(dentry);
return NULL;
}
alias = d_materialise_unique(dentry, inode);
if (alias != NULL) {
dput(dentry);
if (IS_ERR(alias))
return NULL;
dentry = alias;
}
nfs_renew_times(dentry);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
return dentry;
out_renew:
nfs_renew_times(dentry);
nfs_refresh_verifier(dentry, nfs_save_change_attribute(dir));
return dentry;
} | static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
{
struct dentry *parent = desc->file->f_path.dentry;
struct inode *dir = parent->d_inode;
struct nfs_entry *entry = desc->entry;
struct dentry *dentry, *alias;
struct qstr name = {
.name = entry->name,
.len = entry->len,
};
struct inode *inode;
switch (name.len) {
case 2:
if (name.name[0] == '.' && name.name[1] == '.')
return dget_parent(parent);
break;
case 1:
if (name.name[0] == '.')
return dget(parent);
}
name.hash = full_name_hash(name.name, name.len);
dentry = d_lookup(parent, &name);
if (dentry != NULL) {
if (dentry->d_inode != NULL &&
(NFS_FILEID(dentry->d_inode) == entry->ino ||
d_mountpoint(dentry))) {
if (!desc->plus || entry->fh->size == 0)
return dentry;
if (nfs_compare_fh(NFS_FH(dentry->d_inode),
entry->fh) == 0)
goto out_renew;
}
d_drop(dentry);
dput(dentry);
}
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
return NULL;
dentry = d_alloc(parent, &name);
if (dentry == NULL)
return NULL;
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
if (IS_ERR(inode)) {
dput(dentry);
return NULL;
}
alias = d_materialise_unique(dentry, inode);
if (alias != NULL) {
dput(dentry);
if (IS_ERR(alias))
return NULL;
dentry = alias;
}
nfs_renew_times(dentry);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
return dentry;
out_renew:
nfs_renew_times(dentry);
nfs_refresh_verifier(dentry, nfs_save_change_attribute(dir));
return dentry;
} | 1,300 |
0 | void qemu_system_debug_request(void) { debug_requested = 1; vm_stop(VMSTOP_DEBUG); } | void qemu_system_debug_request(void) { debug_requested = 1; vm_stop(VMSTOP_DEBUG); } | 1,301 |
0 | bool Messageheader::Parser::state_fieldbody_crlf(char ch)
{
if (ch == '\r')
SET_STATE(state_end_cr);
else if (ch == '\n')
{
log_debug("header " << fieldnamePtr << ": " << fieldbodyPtr);
switch (header.onField(fieldnamePtr, fieldbodyPtr))
{
case OK:
case END: return true;
break;
case FAIL: failedFlag = true;
log_warn("invalid character " << chartoprint(ch) << " in fieldbody");
break;
}
*headerdataPtr = '\0';
return true;
}
else if (std::isspace(ch))
{
// continuation line
checkHeaderspace(1);
*(headerdataPtr - 1) = '\n';
*headerdataPtr++ = ch;
SET_STATE(state_fieldbody);
}
else if (ch >= 33 && ch <= 126)
{
switch (header.onField(fieldnamePtr, fieldbodyPtr))
{
case OK: SET_STATE(state_fieldname);
break;
case FAIL: failedFlag = true;
log_warn("invalid character " << chartoprint(ch) << " in fieldbody");
break;
case END: return true;
break;
}
fieldnamePtr = headerdataPtr;
checkHeaderspace(1);
*headerdataPtr++ = ch;
}
return false;
} | bool Messageheader::Parser::state_fieldbody_crlf(char ch)
{
if (ch == '\r')
SET_STATE(state_end_cr);
else if (ch == '\n')
{
log_debug("header " << fieldnamePtr << ": " << fieldbodyPtr);
switch (header.onField(fieldnamePtr, fieldbodyPtr))
{
case OK:
case END: return true;
break;
case FAIL: failedFlag = true;
log_warn("invalid character " << chartoprint(ch) << " in fieldbody");
break;
}
*headerdataPtr = '\0';
return true;
}
else if (std::isspace(ch))
{
checkHeaderspace(1);
*(headerdataPtr - 1) = '\n';
*headerdataPtr++ = ch;
SET_STATE(state_fieldbody);
}
else if (ch >= 33 && ch <= 126)
{
switch (header.onField(fieldnamePtr, fieldbodyPtr))
{
case OK: SET_STATE(state_fieldname);
break;
case FAIL: failedFlag = true;
log_warn("invalid character " << chartoprint(ch) << " in fieldbody");
break;
case END: return true;
break;
}
fieldnamePtr = headerdataPtr;
checkHeaderspace(1);
*headerdataPtr++ = ch;
}
return false;
} | 1,302 |
0 | static UBool _isAlphaNumericString ( const char * s , int32_t len ) {
int32_t i ;
for ( i = 0 ;
i < len ;
i ++ ) {
if ( ! ISALPHA ( * ( s + i ) ) && ! ISNUMERIC ( * ( s + i ) ) ) {
return FALSE ;
}
}
return TRUE ;
} | static UBool _isAlphaNumericString ( const char * s , int32_t len ) {
int32_t i ;
for ( i = 0 ;
i < len ;
i ++ ) {
if ( ! ISALPHA ( * ( s + i ) ) && ! ISNUMERIC ( * ( s + i ) ) ) {
return FALSE ;
}
}
return TRUE ;
} | 1,305 |
0 | static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { return bdrv_co_pwrite_zeroes(bs->file->bs, offset, count, flags); } | static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { return bdrv_co_pwrite_zeroes(bs->file->bs, offset, count, flags); } | 1,306 |
0 | int main()
{
gdImagePtr im1, im2;
FILE *fp;
int size;
fp = gdTestFileOpen2("webp", "bug_double_free.jpg");
gdTestAssert(fp != NULL);
im1 = gdImageCreateFromJpeg(fp);
gdTestAssert(im1 != NULL);
fclose(fp);
im2 = gdImageWebpPtr(im1, &size);
gdTestAssert(im2 == NULL);
gdImageDestroy(im1);
return gdNumFailures();
} | int main()
{
gdImagePtr im1, im2;
FILE *fp;
int size;
fp = gdTestFileOpen2("webp", "bug_double_free.jpg");
gdTestAssert(fp != NULL);
im1 = gdImageCreateFromJpeg(fp);
gdTestAssert(im1 != NULL);
fclose(fp);
im2 = gdImageWebpPtr(im1, &size);
gdTestAssert(im2 == NULL);
gdImageDestroy(im1);
return gdNumFailures();
} | 1,307 |
1 | struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
struct nfs_fh *mntfh)
{
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
struct nfs_fattr fattr;
int error;
dprintk("--> nfs4_create_referral_server()\n");
server = nfs_alloc_server();
if (!server)
return ERR_PTR(-ENOMEM);
parent_server = NFS_SB(data->sb);
parent_client = parent_server->nfs_client;
/* Get a client representation.
* Note: NFSv4 always uses TCP, */
error = nfs4_set_client(server, data->hostname, data->addr,
parent_client->cl_ipaddr,
data->authflavor,
parent_server->client->cl_xprt->prot,
parent_client->retrans_timeo,
parent_client->retrans_count);
if (error < 0)
goto error;
/* Initialise the client representation from the parent server */
nfs_server_copy_userdata(server, parent_server);
server->caps |= NFS_CAP_ATOMIC_OPEN;
error = nfs_init_server_rpcclient(server, data->authflavor);
if (error < 0)
goto error;
BUG_ON(!server->nfs_client);
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
/* Probe the root fh to retrieve its FSID and filehandle */
error = nfs4_path_walk(server, mntfh, data->mnt_path);
if (error < 0)
goto error;
/* probe the filesystem info for this server filesystem */
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
dprintk("Referral FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
spin_lock(&nfs_client_lock);
list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
list_add_tail(&server->master_link, &nfs_volume_list);
spin_unlock(&nfs_client_lock);
server->mount_time = jiffies;
dprintk("<-- nfs_create_referral_server() = %p\n", server);
return server;
error:
nfs_free_server(server);
dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
return ERR_PTR(error);
} | struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
struct nfs_fh *mntfh)
{
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
struct nfs_fattr fattr;
int error;
dprintk("--> nfs4_create_referral_server()\n");
server = nfs_alloc_server();
if (!server)
return ERR_PTR(-ENOMEM);
parent_server = NFS_SB(data->sb);
parent_client = parent_server->nfs_client;
error = nfs4_set_client(server, data->hostname, data->addr,
parent_client->cl_ipaddr,
data->authflavor,
parent_server->client->cl_xprt->prot,
parent_client->retrans_timeo,
parent_client->retrans_count);
if (error < 0)
goto error;
nfs_server_copy_userdata(server, parent_server);
server->caps |= NFS_CAP_ATOMIC_OPEN;
error = nfs_init_server_rpcclient(server, data->authflavor);
if (error < 0)
goto error;
BUG_ON(!server->nfs_client);
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
error = nfs4_path_walk(server, mntfh, data->mnt_path);
if (error < 0)
goto error;
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
dprintk("Referral FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
spin_lock(&nfs_client_lock);
list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
list_add_tail(&server->master_link, &nfs_volume_list);
spin_unlock(&nfs_client_lock);
server->mount_time = jiffies;
dprintk("<-- nfs_create_referral_server() = %p\n", server);
return server;
error:
nfs_free_server(server);
dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
return ERR_PTR(error);
} | 1,308 |
0 | void monitor_flush(Monitor *mon) { int i; if (term_outbuf_index > 0) { for (i = 0; i < MAX_MON; i++) if (monitor_hd[i] && monitor_hd[i]->focus == 0) qemu_chr_write(monitor_hd[i], term_outbuf, term_outbuf_index); term_outbuf_index = 0; } } | void monitor_flush(Monitor *mon) { int i; if (term_outbuf_index > 0) { for (i = 0; i < MAX_MON; i++) if (monitor_hd[i] && monitor_hd[i]->focus == 0) qemu_chr_write(monitor_hd[i], term_outbuf, term_outbuf_index); term_outbuf_index = 0; } } | 1,310 |
0 | int main()
{
gdImagePtr im;
FILE *fp;
fp = gdTestFileOpen2("gd2", "too_few_image_data.gd2");
gdTestAssert(fp != NULL);
im = gdImageCreateFromGd2(fp);
gdTestAssert(im == NULL);
fclose(fp);
return gdNumFailures();
} | int main()
{
gdImagePtr im;
FILE *fp;
fp = gdTestFileOpen2("gd2", "too_few_image_data.gd2");
gdTestAssert(fp != NULL);
im = gdImageCreateFromGd2(fp);
gdTestAssert(im == NULL);
fclose(fp);
return gdNumFailures();
} | 1,311 |
0 | void qemu_co_queue_restart_all(CoQueue *queue) { while (qemu_co_queue_next(queue)) { /* Do nothing */ } } | void qemu_co_queue_restart_all(CoQueue *queue) { while (qemu_co_queue_next(queue)) { } } | 1,312 |
0 | static int _gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
int x, y, pos;
Wbmp *wbmp;
/* create the WBMP */
if((wbmp = createwbmp(gdImageSX(image), gdImageSY(image), WBMP_WHITE)) == NULL) {
gd_error("Could not create WBMP\n");
return 1;
}
/* fill up the WBMP structure */
pos = 0;
for(y = 0; y < gdImageSY(image); y++) {
for(x = 0; x < gdImageSX(image); x++) {
if(gdImageGetPixel(image, x, y) == fg) {
wbmp->bitmap[pos] = WBMP_BLACK;
}
pos++;
}
}
/* write the WBMP to a gd file descriptor */
if(writewbmp(wbmp, &gd_putout, out)) {
freewbmp(wbmp);
gd_error("Could not save WBMP\n");
return 1;
}
/* des submitted this bugfix: gdFree the memory. */
freewbmp(wbmp);
return 0;
} | static int _gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
int x, y, pos;
Wbmp *wbmp;
if((wbmp = createwbmp(gdImageSX(image), gdImageSY(image), WBMP_WHITE)) == NULL) {
gd_error("Could not create WBMP\n");
return 1;
}
pos = 0;
for(y = 0; y < gdImageSY(image); y++) {
for(x = 0; x < gdImageSX(image); x++) {
if(gdImageGetPixel(image, x, y) == fg) {
wbmp->bitmap[pos] = WBMP_BLACK;
}
pos++;
}
}
if(writewbmp(wbmp, &gd_putout, out)) {
freewbmp(wbmp);
gd_error("Could not save WBMP\n");
return 1;
}
freewbmp(wbmp);
return 0;
} | 1,318 |
0 | static void cavs_flush ( AVCodecContext * avctx ) {
AVSContext * h = avctx -> priv_data ;
h -> got_keyframe = 0 ;
} | static void cavs_flush ( AVCodecContext * avctx ) {
AVSContext * h = avctx -> priv_data ;
h -> got_keyframe = 0 ;
} | 1,319 |
0 | static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { NvdimmFitBuffer *fit_buf = &state->fit_buf; NvdimmFuncReadFITIn *read_fit; NvdimmFuncReadFITOut *read_fit_out; GArray *fit; uint32_t read_len = 0, func_ret_status; int size; read_fit = (NvdimmFuncReadFITIn *)in->arg3; le32_to_cpus(&read_fit->offset); qemu_mutex_lock(&fit_buf->lock); fit = fit_buf->fit; nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n", read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No"); if (read_fit->offset > fit->len) { func_ret_status = 3 /* Invalid Input Parameters */; goto exit; } /* It is the first time to read FIT. */ if (!read_fit->offset) { fit_buf->dirty = false; } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */ func_ret_status = 0x100 /* fit changed */; goto exit; } func_ret_status = 0 /* Success */; read_len = MIN(fit->len - read_fit->offset, 4096 - sizeof(NvdimmFuncReadFITOut)); exit: size = sizeof(NvdimmFuncReadFITOut) + read_len; read_fit_out = g_malloc(size); read_fit_out->len = cpu_to_le32(size); read_fit_out->func_ret_status = cpu_to_le32(func_ret_status); memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len); cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size); g_free(read_fit_out); qemu_mutex_unlock(&fit_buf->lock); } | static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { NvdimmFitBuffer *fit_buf = &state->fit_buf; NvdimmFuncReadFITIn *read_fit; NvdimmFuncReadFITOut *read_fit_out; GArray *fit; uint32_t read_len = 0, func_ret_status; int size; read_fit = (NvdimmFuncReadFITIn *)in->arg3; le32_to_cpus(&read_fit->offset); qemu_mutex_lock(&fit_buf->lock); fit = fit_buf->fit; nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n", read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No"); if (read_fit->offset > fit->len) { func_ret_status = 3 ; goto exit; } if (!read_fit->offset) { fit_buf->dirty = false; } else if (fit_buf->dirty) { func_ret_status = 0x100 ; goto exit; } func_ret_status = 0 ; read_len = MIN(fit->len - read_fit->offset, 4096 - sizeof(NvdimmFuncReadFITOut)); exit: size = sizeof(NvdimmFuncReadFITOut) + read_len; read_fit_out = g_malloc(size); read_fit_out->len = cpu_to_le32(size); read_fit_out->func_ret_status = cpu_to_le32(func_ret_status); memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len); cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size); g_free(read_fit_out); qemu_mutex_unlock(&fit_buf->lock); } | 1,320 |
1 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
struct ib_send_wr *failed_wr;
struct scatterlist *scat;
u32 pos;
u32 i;
u32 work_alloc;
u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
int bytes_sent = 0;
int ret;
int flow_controlled = 0;
int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
/* Do not send cong updates to IB loopback */
if (conn->c_loopback
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
}
/* FIXME we may overallocate here */
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_throttle);
ret = -ENOMEM;
goto out;
}
}
/* map the message the first time we see it */
if (!ic->i_data_op) {
if (rm->data.op_nents) {
rm->data.op_count = ib_dma_map_sg(dev,
rm->data.op_sg,
rm->data.op_nents,
DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
rm->data.op_count = 0;
}
rds_message_addref(rm);
ic->i_data_op = &rm->data;
/* Finalize the header */
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
if (rm->m_rdma_cookie) {
rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
rds_rdma_cookie_key(rm->m_rdma_cookie),
rds_rdma_cookie_offset(rm->m_rdma_cookie));
}
/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
* we should not do this unless we have a chance of at least
* sticking the header into the send ring. Which is why we
* should call rds_ib_ring_alloc first. */
rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
rds_message_make_checksum(&rm->m_inc.i_hdr);
/*
* Update adv_credits since we reset the ACK_REQUIRED bit.
*/
if (ic->i_flowctl) {
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
}
}
/* Sometimes you want to put a fence between an RDMA
* READ and the following SEND.
* We could either do this all the time
* or when requested by the user. Right now, we let
* the application choose.
*/
if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
/* Each frag gets a header. Msgs may be 0 bytes */
send = &ic->i_sends[pos];
first = send;
prev = NULL;
scat = &ic->i_data_op->op_sg[sg];
i = 0;
do {
unsigned int len = 0;
/* Set up the header */
send->s_wr.send_flags = send_flags;
send->s_wr.opcode = IB_WR_SEND;
send->s_wr.num_sge = 1;
send->s_wr.next = NULL;
send->s_queued = jiffies;
send->s_op = NULL;
send->s_sge[0].addr = ic->i_send_hdrs_dma
+ (pos * sizeof(struct rds_header));
send->s_sge[0].length = sizeof(struct rds_header);
memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
/* Set up the data, if present */
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
send->s_wr.num_sge = 2;
send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
send->s_sge[1].length = len;
bytes_sent += len;
off += len;
if (off == ib_sg_dma_len(dev, scat)) {
scat++;
off = 0;
}
}
rds_ib_set_wr_signal_state(ic, send, 0);
/*
* Always signal the last one if we're stopping due to flow control.
*/
if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = &ic->i_send_hdrs[pos];
/* add credit and redo the header checksum */
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
adv_credits = 0;
rds_ib_stats_inc(s_ib_tx_credit_updates);
}
if (prev)
prev->s_wr.next = &send->s_wr;
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
send = &ic->i_sends[pos];
i++;
} while (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */
if (hdr_off == 0)
bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
ic->i_data_op = NULL;
}
/* Put back wrs & credits we didn't use */
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
/* XXX need to worry about failed_wr and partial sends. */
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
first, &first->s_wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
if (prev->s_op) {
ic->i_data_op = prev->s_op;
prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
} | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
struct ib_send_wr *failed_wr;
struct scatterlist *scat;
u32 pos;
u32 i;
u32 work_alloc;
u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
int bytes_sent = 0;
int ret;
int flow_controlled = 0;
int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
if (conn->c_loopback
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
}
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_throttle);
ret = -ENOMEM;
goto out;
}
}
if (!ic->i_data_op) {
if (rm->data.op_nents) {
rm->data.op_count = ib_dma_map_sg(dev,
rm->data.op_sg,
rm->data.op_nents,
DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM;
goto out;
}
} else {
rm->data.op_count = 0;
}
rds_message_addref(rm);
ic->i_data_op = &rm->data;
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
if (rm->m_rdma_cookie) {
rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
rds_rdma_cookie_key(rm->m_rdma_cookie),
rds_rdma_cookie_offset(rm->m_rdma_cookie));
}
rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
rds_message_make_checksum(&rm->m_inc.i_hdr);
if (ic->i_flowctl) {
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
}
}
if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
send = &ic->i_sends[pos];
first = send;
prev = NULL;
scat = &ic->i_data_op->op_sg[sg];
i = 0;
do {
unsigned int len = 0;
send->s_wr.send_flags = send_flags;
send->s_wr.opcode = IB_WR_SEND;
send->s_wr.num_sge = 1;
send->s_wr.next = NULL;
send->s_queued = jiffies;
send->s_op = NULL;
send->s_sge[0].addr = ic->i_send_hdrs_dma
+ (pos * sizeof(struct rds_header));
send->s_sge[0].length = sizeof(struct rds_header);
memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
send->s_wr.num_sge = 2;
send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
send->s_sge[1].length = len;
bytes_sent += len;
off += len;
if (off == ib_sg_dma_len(dev, scat)) {
scat++;
off = 0;
}
}
rds_ib_set_wr_signal_state(ic, send, 0);
if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = &ic->i_send_hdrs[pos];
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
adv_credits = 0;
rds_ib_stats_inc(s_ib_tx_credit_updates);
}
if (prev)
prev->s_wr.next = &send->s_wr;
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
send = &ic->i_sends[pos];
i++;
} while (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]);
if (hdr_off == 0)
bytes_sent += sizeof(struct rds_header);
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
ic->i_data_op = NULL;
}
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
first, &first->s_wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
if (prev->s_op) {
ic->i_data_op = prev->s_op;
prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
} | 1,321 |
0 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
struct ib_send_wr *failed_wr;
struct scatterlist *scat;
u32 pos;
u32 i;
u32 work_alloc;
u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
int bytes_sent = 0;
int ret;
int flow_controlled = 0;
int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
/* Do not send cong updates to IB loopback */
if (conn->c_loopback
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
scat = &rm->data.op_sg[sg];
ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
return ret;
}
/* FIXME we may overallocate here */
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_throttle);
ret = -ENOMEM;
goto out;
}
}
/* map the message the first time we see it */
if (!ic->i_data_op) {
if (rm->data.op_nents) {
rm->data.op_count = ib_dma_map_sg(dev,
rm->data.op_sg,
rm->data.op_nents,
DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
rm->data.op_count = 0;
}
rds_message_addref(rm);
ic->i_data_op = &rm->data;
/* Finalize the header */
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
if (rm->m_rdma_cookie) {
rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
rds_rdma_cookie_key(rm->m_rdma_cookie),
rds_rdma_cookie_offset(rm->m_rdma_cookie));
}
/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
* we should not do this unless we have a chance of at least
* sticking the header into the send ring. Which is why we
* should call rds_ib_ring_alloc first. */
rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
rds_message_make_checksum(&rm->m_inc.i_hdr);
/*
* Update adv_credits since we reset the ACK_REQUIRED bit.
*/
if (ic->i_flowctl) {
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
}
}
/* Sometimes you want to put a fence between an RDMA
* READ and the following SEND.
* We could either do this all the time
* or when requested by the user. Right now, we let
* the application choose.
*/
if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
/* Each frag gets a header. Msgs may be 0 bytes */
send = &ic->i_sends[pos];
first = send;
prev = NULL;
scat = &ic->i_data_op->op_sg[sg];
i = 0;
do {
unsigned int len = 0;
/* Set up the header */
send->s_wr.send_flags = send_flags;
send->s_wr.opcode = IB_WR_SEND;
send->s_wr.num_sge = 1;
send->s_wr.next = NULL;
send->s_queued = jiffies;
send->s_op = NULL;
send->s_sge[0].addr = ic->i_send_hdrs_dma
+ (pos * sizeof(struct rds_header));
send->s_sge[0].length = sizeof(struct rds_header);
memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
/* Set up the data, if present */
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
send->s_wr.num_sge = 2;
send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
send->s_sge[1].length = len;
bytes_sent += len;
off += len;
if (off == ib_sg_dma_len(dev, scat)) {
scat++;
off = 0;
}
}
rds_ib_set_wr_signal_state(ic, send, 0);
/*
* Always signal the last one if we're stopping due to flow control.
*/
if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = &ic->i_send_hdrs[pos];
/* add credit and redo the header checksum */
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
adv_credits = 0;
rds_ib_stats_inc(s_ib_tx_credit_updates);
}
if (prev)
prev->s_wr.next = &send->s_wr;
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
send = &ic->i_sends[pos];
i++;
} while (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */
if (hdr_off == 0)
bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
ic->i_data_op = NULL;
}
/* Put back wrs & credits we didn't use */
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
/* XXX need to worry about failed_wr and partial sends. */
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
first, &first->s_wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
if (prev->s_op) {
ic->i_data_op = prev->s_op;
prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
} | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
struct ib_send_wr *failed_wr;
struct scatterlist *scat;
u32 pos;
u32 i;
u32 work_alloc;
u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
int bytes_sent = 0;
int ret;
int flow_controlled = 0;
int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
if (conn->c_loopback
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
scat = &rm->data.op_sg[sg];
ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
return ret;
}
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_throttle);
ret = -ENOMEM;
goto out;
}
}
if (!ic->i_data_op) {
if (rm->data.op_nents) {
rm->data.op_count = ib_dma_map_sg(dev,
rm->data.op_sg,
rm->data.op_nents,
DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM;
goto out;
}
} else {
rm->data.op_count = 0;
}
rds_message_addref(rm);
ic->i_data_op = &rm->data;
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
if (rm->m_rdma_cookie) {
rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
rds_rdma_cookie_key(rm->m_rdma_cookie),
rds_rdma_cookie_offset(rm->m_rdma_cookie));
}
rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
rds_message_make_checksum(&rm->m_inc.i_hdr);
if (ic->i_flowctl) {
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
}
}
if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
send = &ic->i_sends[pos];
first = send;
prev = NULL;
scat = &ic->i_data_op->op_sg[sg];
i = 0;
do {
unsigned int len = 0;
send->s_wr.send_flags = send_flags;
send->s_wr.opcode = IB_WR_SEND;
send->s_wr.num_sge = 1;
send->s_wr.next = NULL;
send->s_queued = jiffies;
send->s_op = NULL;
send->s_sge[0].addr = ic->i_send_hdrs_dma
+ (pos * sizeof(struct rds_header));
send->s_sge[0].length = sizeof(struct rds_header);
memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
send->s_wr.num_sge = 2;
send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
send->s_sge[1].length = len;
bytes_sent += len;
off += len;
if (off == ib_sg_dma_len(dev, scat)) {
scat++;
off = 0;
}
}
rds_ib_set_wr_signal_state(ic, send, 0);
if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = &ic->i_send_hdrs[pos];
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
adv_credits = 0;
rds_ib_stats_inc(s_ib_tx_credit_updates);
}
if (prev)
prev->s_wr.next = &send->s_wr;
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
send = &ic->i_sends[pos];
i++;
} while (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]);
if (hdr_off == 0)
bytes_sent += sizeof(struct rds_header);
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
ic->i_data_op = NULL;
}
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
first, &first->s_wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
if (prev->s_op) {
ic->i_data_op = prev->s_op;
prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
} | 1,322 |
0 | static void tftp_send_error(struct tftp_session *spt, uint16_t errorcode, const char *msg, struct tftp_t *recv_tp) { struct sockaddr_in saddr, daddr; struct mbuf *m; struct tftp_t *tp; m = m_get(spt->slirp); if (!m) { goto out; } memset(m->m_data, 0, m->m_size); m->m_data += IF_MAXLINKHDR; tp = (void *)m->m_data; m->m_data += sizeof(struct udpiphdr); tp->tp_op = htons(TFTP_ERROR); tp->x.tp_error.tp_error_code = htons(errorcode); pstrcpy((char *)tp->x.tp_error.tp_msg, sizeof(tp->x.tp_error.tp_msg), msg); saddr.sin_addr = recv_tp->ip.ip_dst; saddr.sin_port = recv_tp->udp.uh_dport; daddr.sin_addr = spt->client_ip; daddr.sin_port = spt->client_port; m->m_len = sizeof(struct tftp_t) - 514 + 3 + strlen(msg) - sizeof(struct ip) - sizeof(struct udphdr); udp_output2(NULL, m, &saddr, &daddr, IPTOS_LOWDELAY); out: tftp_session_terminate(spt); } | static void tftp_send_error(struct tftp_session *spt, uint16_t errorcode, const char *msg, struct tftp_t *recv_tp) { struct sockaddr_in saddr, daddr; struct mbuf *m; struct tftp_t *tp; m = m_get(spt->slirp); if (!m) { goto out; } memset(m->m_data, 0, m->m_size); m->m_data += IF_MAXLINKHDR; tp = (void *)m->m_data; m->m_data += sizeof(struct udpiphdr); tp->tp_op = htons(TFTP_ERROR); tp->x.tp_error.tp_error_code = htons(errorcode); pstrcpy((char *)tp->x.tp_error.tp_msg, sizeof(tp->x.tp_error.tp_msg), msg); saddr.sin_addr = recv_tp->ip.ip_dst; saddr.sin_port = recv_tp->udp.uh_dport; daddr.sin_addr = spt->client_ip; daddr.sin_port = spt->client_port; m->m_len = sizeof(struct tftp_t) - 514 + 3 + strlen(msg) - sizeof(struct ip) - sizeof(struct udphdr); udp_output2(NULL, m, &saddr, &daddr, IPTOS_LOWDELAY); out: tftp_session_terminate(spt); } | 1,323 |
0 | BGD_DECLARE(void) gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
_gdImageWBMPCtx(image, fg, out);
} | BGD_DECLARE(void) gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
_gdImageWBMPCtx(image, fg, out);
} | 1,324 |
1 | static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_data *data)
{
struct nfs_client *clp;
int error, nfsvers = 2;
dprintk("--> nfs_init_server()\n");
#ifdef CONFIG_NFS_V3
if (data->flags & NFS_MOUNT_VER3)
nfsvers = 3;
#endif
/* Allocate or find a client reference we can use */
clp = nfs_get_client(data->hostname, &data->addr, nfsvers);
if (IS_ERR(clp)) {
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
return PTR_ERR(clp);
}
error = nfs_init_client(clp, data);
if (error < 0)
goto error;
server->nfs_client = clp;
/* Initialise the client representation from the mount data */
server->flags = data->flags & NFS_MOUNT_FLAGMASK;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
if (data->wsize)
server->wsize = nfs_block_size(data->wsize, NULL);
server->acregmin = data->acregmin * HZ;
server->acregmax = data->acregmax * HZ;
server->acdirmin = data->acdirmin * HZ;
server->acdirmax = data->acdirmax * HZ;
/* Start lockd here, before we might error out */
error = nfs_start_lockd(server);
if (error < 0)
goto error;
error = nfs_init_server_rpcclient(server, data->pseudoflavor);
if (error < 0)
goto error;
server->namelen = data->namlen;
/* Create a client RPC handle for the NFSv3 ACL management interface */
nfs_init_server_aclclient(server);
if (clp->cl_nfsversion == 3) {
if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
server->namelen = NFS3_MAXNAMLEN;
if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
server->caps |= NFS_CAP_READDIRPLUS;
} else {
if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
server->namelen = NFS2_MAXNAMLEN;
}
dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
return 0;
error:
server->nfs_client = NULL;
nfs_put_client(clp);
dprintk("<-- nfs_init_server() = xerror %d\n", error);
return error;
} | static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_data *data)
{
struct nfs_client *clp;
int error, nfsvers = 2;
dprintk("--> nfs_init_server()\n");
#ifdef CONFIG_NFS_V3
if (data->flags & NFS_MOUNT_VER3)
nfsvers = 3;
#endif
clp = nfs_get_client(data->hostname, &data->addr, nfsvers);
if (IS_ERR(clp)) {
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
return PTR_ERR(clp);
}
error = nfs_init_client(clp, data);
if (error < 0)
goto error;
server->nfs_client = clp;
server->flags = data->flags & NFS_MOUNT_FLAGMASK;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
if (data->wsize)
server->wsize = nfs_block_size(data->wsize, NULL);
server->acregmin = data->acregmin * HZ;
server->acregmax = data->acregmax * HZ;
server->acdirmin = data->acdirmin * HZ;
server->acdirmax = data->acdirmax * HZ;
error = nfs_start_lockd(server);
if (error < 0)
goto error;
error = nfs_init_server_rpcclient(server, data->pseudoflavor);
if (error < 0)
goto error;
server->namelen = data->namlen;
nfs_init_server_aclclient(server);
if (clp->cl_nfsversion == 3) {
if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
server->namelen = NFS3_MAXNAMLEN;
if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
server->caps |= NFS_CAP_READDIRPLUS;
} else {
if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
server->namelen = NFS2_MAXNAMLEN;
}
dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
return 0;
error:
server->nfs_client = NULL;
nfs_put_client(clp);
dprintk("<-- nfs_init_server() = xerror %d\n", error);
return error;
} | 1,325 |
0 | static int tls_process_cke_gost ( SSL * s , PACKET * pkt , int * al ) {
# ifndef OPENSSL_NO_GOST EVP_PKEY_CTX * pkey_ctx ;
EVP_PKEY * client_pub_pkey = NULL , * pk = NULL ;
unsigned char premaster_secret [ 32 ] ;
const unsigned char * start ;
size_t outlen = 32 , inlen ;
unsigned long alg_a ;
int Ttag , Tclass ;
long Tlen ;
long sess_key_len ;
const unsigned char * data ;
int ret = 0 ;
alg_a = s -> s3 -> tmp . new_cipher -> algorithm_auth ;
if ( alg_a & SSL_aGOST12 ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST12_512 ] . privatekey ;
if ( pk == NULL ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST12_256 ] . privatekey ;
}
if ( pk == NULL ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST01 ] . privatekey ;
}
}
else if ( alg_a & SSL_aGOST01 ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST01 ] . privatekey ;
}
pkey_ctx = EVP_PKEY_CTX_new ( pk , NULL ) ;
if ( pkey_ctx == NULL ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_MALLOC_FAILURE ) ;
return 0 ;
}
if ( EVP_PKEY_decrypt_init ( pkey_ctx ) <= 0 ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
return 0 ;
}
client_pub_pkey = X509_get0_pubkey ( s -> session -> peer ) ;
if ( client_pub_pkey ) {
if ( EVP_PKEY_derive_set_peer ( pkey_ctx , client_pub_pkey ) <= 0 ) ERR_clear_error ( ) ;
}
sess_key_len = PACKET_remaining ( pkt ) ;
if ( ! PACKET_get_bytes ( pkt , & data , sess_key_len ) ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
goto err ;
}
if ( ASN1_get_object ( ( const unsigned char * * ) & data , & Tlen , & Ttag , & Tclass , sess_key_len ) != V_ASN1_CONSTRUCTED || Ttag != V_ASN1_SEQUENCE || Tclass != V_ASN1_UNIVERSAL ) {
* al = SSL_AD_DECODE_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , SSL_R_DECRYPTION_FAILED ) ;
goto err ;
}
start = data ;
inlen = Tlen ;
if ( EVP_PKEY_decrypt ( pkey_ctx , premaster_secret , & outlen , start , inlen ) <= 0 ) {
* al = SSL_AD_DECODE_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , SSL_R_DECRYPTION_FAILED ) ;
goto err ;
}
if ( ! ssl_generate_master_secret ( s , premaster_secret , sizeof ( premaster_secret ) , 0 ) ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
goto err ;
}
if ( EVP_PKEY_CTX_ctrl ( pkey_ctx , - 1 , - 1 , EVP_PKEY_CTRL_PEER_KEY , 2 , NULL ) > 0 ) s -> statem . no_cert_verify = 1 ;
ret = 1 ;
err : EVP_PKEY_CTX_free ( pkey_ctx ) ;
return ret ;
# else * al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
return 0 ;
# endif } | static int tls_process_cke_gost ( SSL * s , PACKET * pkt , int * al ) {
# ifndef OPENSSL_NO_GOST EVP_PKEY_CTX * pkey_ctx ;
EVP_PKEY * client_pub_pkey = NULL , * pk = NULL ;
unsigned char premaster_secret [ 32 ] ;
const unsigned char * start ;
size_t outlen = 32 , inlen ;
unsigned long alg_a ;
int Ttag , Tclass ;
long Tlen ;
long sess_key_len ;
const unsigned char * data ;
int ret = 0 ;
alg_a = s -> s3 -> tmp . new_cipher -> algorithm_auth ;
if ( alg_a & SSL_aGOST12 ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST12_512 ] . privatekey ;
if ( pk == NULL ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST12_256 ] . privatekey ;
}
if ( pk == NULL ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST01 ] . privatekey ;
}
}
else if ( alg_a & SSL_aGOST01 ) {
pk = s -> cert -> pkeys [ SSL_PKEY_GOST01 ] . privatekey ;
}
pkey_ctx = EVP_PKEY_CTX_new ( pk , NULL ) ;
if ( pkey_ctx == NULL ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_MALLOC_FAILURE ) ;
return 0 ;
}
if ( EVP_PKEY_decrypt_init ( pkey_ctx ) <= 0 ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
return 0 ;
}
client_pub_pkey = X509_get0_pubkey ( s -> session -> peer ) ;
if ( client_pub_pkey ) {
if ( EVP_PKEY_derive_set_peer ( pkey_ctx , client_pub_pkey ) <= 0 ) ERR_clear_error ( ) ;
}
sess_key_len = PACKET_remaining ( pkt ) ;
if ( ! PACKET_get_bytes ( pkt , & data , sess_key_len ) ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
goto err ;
}
if ( ASN1_get_object ( ( const unsigned char * * ) & data , & Tlen , & Ttag , & Tclass , sess_key_len ) != V_ASN1_CONSTRUCTED || Ttag != V_ASN1_SEQUENCE || Tclass != V_ASN1_UNIVERSAL ) {
* al = SSL_AD_DECODE_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , SSL_R_DECRYPTION_FAILED ) ;
goto err ;
}
start = data ;
inlen = Tlen ;
if ( EVP_PKEY_decrypt ( pkey_ctx , premaster_secret , & outlen , start , inlen ) <= 0 ) {
* al = SSL_AD_DECODE_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , SSL_R_DECRYPTION_FAILED ) ;
goto err ;
}
if ( ! ssl_generate_master_secret ( s , premaster_secret , sizeof ( premaster_secret ) , 0 ) ) {
* al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
goto err ;
}
if ( EVP_PKEY_CTX_ctrl ( pkey_ctx , - 1 , - 1 , EVP_PKEY_CTRL_PEER_KEY , 2 , NULL ) > 0 ) s -> statem . no_cert_verify = 1 ;
ret = 1 ;
err : EVP_PKEY_CTX_free ( pkey_ctx ) ;
return ret ;
# else * al = SSL_AD_INTERNAL_ERROR ;
SSLerr ( SSL_F_TLS_PROCESS_CKE_GOST , ERR_R_INTERNAL_ERROR ) ;
return 0 ;
# endif } | 1,326 |
1 | int udev_monitor_enable_receiving(struct udev_monitor *udev_monitor)
{
int err;
const int on = 1;
if (udev_monitor->snl.nl_family != 0) {
err = bind(udev_monitor->sock,
(struct sockaddr *)&udev_monitor->snl, sizeof(struct sockaddr_nl));
if (err < 0) {
err(udev_monitor->udev, "bind failed: %m\n");
return err;
}
dbg(udev_monitor->udev, "monitor %p listening on netlink\n", udev_monitor);
} else if (udev_monitor->sun.sun_family != 0) {
err = bind(udev_monitor->sock,
(struct sockaddr *)&udev_monitor->sun, udev_monitor->addrlen);
if (err < 0) {
err(udev_monitor->udev, "bind failed: %m\n");
return err;
}
/* enable receiving of the sender credentials */
setsockopt(udev_monitor->sock, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on));
dbg(udev_monitor->udev, "monitor %p listening on socket\n", udev_monitor);
}
return 0;
} | int udev_monitor_enable_receiving(struct udev_monitor *udev_monitor)
{
int err;
const int on = 1;
if (udev_monitor->snl.nl_family != 0) {
err = bind(udev_monitor->sock,
(struct sockaddr *)&udev_monitor->snl, sizeof(struct sockaddr_nl));
if (err < 0) {
err(udev_monitor->udev, "bind failed: %m\n");
return err;
}
dbg(udev_monitor->udev, "monitor %p listening on netlink\n", udev_monitor);
} else if (udev_monitor->sun.sun_family != 0) {
err = bind(udev_monitor->sock,
(struct sockaddr *)&udev_monitor->sun, udev_monitor->addrlen);
if (err < 0) {
err(udev_monitor->udev, "bind failed: %m\n");
return err;
}
setsockopt(udev_monitor->sock, SOL_SOCKET, SO_PASSCRED, &on, sizeof(on));
dbg(udev_monitor->udev, "monitor %p listening on socket\n", udev_monitor);
}
return 0;
} | 1,327 |
1 | BGD_DECLARE(void) gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
int x, y, pos;
Wbmp *wbmp;
/* create the WBMP */
if((wbmp = createwbmp(gdImageSX(image), gdImageSY(image), WBMP_WHITE)) == NULL) {
gd_error("Could not create WBMP\n");
return;
}
/* fill up the WBMP structure */
pos = 0;
for(y = 0; y < gdImageSY(image); y++) {
for(x = 0; x < gdImageSX(image); x++) {
if(gdImageGetPixel(image, x, y) == fg) {
wbmp->bitmap[pos] = WBMP_BLACK;
}
pos++;
}
}
/* write the WBMP to a gd file descriptor */
if(writewbmp(wbmp, &gd_putout, out)) {
gd_error("Could not save WBMP\n");
}
/* des submitted this bugfix: gdFree the memory. */
freewbmp(wbmp);
} | BGD_DECLARE(void) gdImageWBMPCtx(gdImagePtr image, int fg, gdIOCtx *out)
{
int x, y, pos;
Wbmp *wbmp;
if((wbmp = createwbmp(gdImageSX(image), gdImageSY(image), WBMP_WHITE)) == NULL) {
gd_error("Could not create WBMP\n");
return;
}
pos = 0;
for(y = 0; y < gdImageSY(image); y++) {
for(x = 0; x < gdImageSX(image); x++) {
if(gdImageGetPixel(image, x, y) == fg) {
wbmp->bitmap[pos] = WBMP_BLACK;
}
pos++;
}
}
if(writewbmp(wbmp, &gd_putout, out)) {
gd_error("Could not save WBMP\n");
}
freewbmp(wbmp);
} | 1,328 |
1 | static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg,
unsigned int off)
{
/* Do not send cong updates to loopback */
if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
}
BUG_ON(hdr_off || sg || off);
rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
/* For the embedded inc. Matching put is in loop_inc_free() */
rds_message_addref(rm);
rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
GFP_KERNEL, KM_USER0);
rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
NULL);
rds_inc_put(&rm->m_inc);
return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
} | static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg,
unsigned int off)
{
if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
}
BUG_ON(hdr_off || sg || off);
rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
rds_message_addref(rm);
rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
GFP_KERNEL, KM_USER0);
rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
NULL);
rds_inc_put(&rm->m_inc);
return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
} | 1,329 |
0 | char * get_language_name ( Oid langoid , bool missing_ok ) {
HeapTuple tp ;
tp = SearchSysCache1 ( LANGOID , ObjectIdGetDatum ( langoid ) ) ;
if ( HeapTupleIsValid ( tp ) ) {
Form_pg_language lantup = ( Form_pg_language ) GETSTRUCT ( tp ) ;
char * result ;
result = pstrdup ( NameStr ( lantup -> lanname ) ) ;
ReleaseSysCache ( tp ) ;
return result ;
}
if ( ! missing_ok ) elog ( ERROR , "cache lookup failed for language %u" , langoid ) ;
return NULL ;
} | char * get_language_name ( Oid langoid , bool missing_ok ) {
HeapTuple tp ;
tp = SearchSysCache1 ( LANGOID , ObjectIdGetDatum ( langoid ) ) ;
if ( HeapTupleIsValid ( tp ) ) {
Form_pg_language lantup = ( Form_pg_language ) GETSTRUCT ( tp ) ;
char * result ;
result = pstrdup ( NameStr ( lantup -> lanname ) ) ;
ReleaseSysCache ( tp ) ;
return result ;
}
if ( ! missing_ok ) elog ( ERROR , "cache lookup failed for language %u" , langoid ) ;
return NULL ;
} | 1,330 |
1 | BGD_DECLARE(void *) gdImageWBMPPtr(gdImagePtr im, int *size, int fg)
{
void *rv;
gdIOCtx *out = gdNewDynamicCtx(2048, NULL);
if (out == NULL) return NULL;
gdImageWBMPCtx(im, fg, out);
rv = gdDPExtractData(out, size);
out->gd_free(out);
return rv;
} | BGD_DECLARE(void *) gdImageWBMPPtr(gdImagePtr im, int *size, int fg)
{
void *rv;
gdIOCtx *out = gdNewDynamicCtx(2048, NULL);
if (out == NULL) return NULL;
gdImageWBMPCtx(im, fg, out);
rv = gdDPExtractData(out, size);
out->gd_free(out);
return rv;
} | 1,333 |
1 | static krb5_error_code build_principal_va ( krb5_context context , krb5_principal princ , unsigned int rlen , const char * realm , va_list ap ) {
krb5_error_code retval = 0 ;
char * r = NULL ;
krb5_data * data = NULL ;
krb5_int32 count = 0 ;
krb5_int32 size = 2 ;
char * component = NULL ;
data = malloc ( size * sizeof ( krb5_data ) ) ;
if ( ! data ) {
retval = ENOMEM ;
}
if ( ! retval ) {
r = strdup ( realm ) ;
if ( ! r ) {
retval = ENOMEM ;
}
}
while ( ! retval && ( component = va_arg ( ap , char * ) ) ) {
if ( count == size ) {
krb5_data * new_data = NULL ;
size *= 2 ;
new_data = realloc ( data , size * sizeof ( krb5_data ) ) ;
if ( new_data ) {
data = new_data ;
}
else {
retval = ENOMEM ;
}
}
if ( ! retval ) {
data [ count ] . length = strlen ( component ) ;
data [ count ] . data = strdup ( component ) ;
if ( ! data [ count ] . data ) {
retval = ENOMEM ;
}
count ++ ;
}
}
if ( ! retval ) {
princ -> type = KRB5_NT_UNKNOWN ;
princ -> magic = KV5M_PRINCIPAL ;
princ -> realm = make_data ( r , rlen ) ;
princ -> data = data ;
princ -> length = count ;
r = NULL ;
data = NULL ;
}
if ( data ) {
while ( -- count >= 0 ) {
free ( data [ count ] . data ) ;
}
free ( data ) ;
}
free ( r ) ;
return retval ;
} | static krb5_error_code build_principal_va ( krb5_context context , krb5_principal princ , unsigned int rlen , const char * realm , va_list ap ) {
krb5_error_code retval = 0 ;
char * r = NULL ;
krb5_data * data = NULL ;
krb5_int32 count = 0 ;
krb5_int32 size = 2 ;
char * component = NULL ;
data = malloc ( size * sizeof ( krb5_data ) ) ;
if ( ! data ) {
retval = ENOMEM ;
}
if ( ! retval ) {
r = strdup ( realm ) ;
if ( ! r ) {
retval = ENOMEM ;
}
}
while ( ! retval && ( component = va_arg ( ap , char * ) ) ) {
if ( count == size ) {
krb5_data * new_data = NULL ;
size *= 2 ;
new_data = realloc ( data , size * sizeof ( krb5_data ) ) ;
if ( new_data ) {
data = new_data ;
}
else {
retval = ENOMEM ;
}
}
if ( ! retval ) {
data [ count ] . length = strlen ( component ) ;
data [ count ] . data = strdup ( component ) ;
if ( ! data [ count ] . data ) {
retval = ENOMEM ;
}
count ++ ;
}
}
if ( ! retval ) {
princ -> type = KRB5_NT_UNKNOWN ;
princ -> magic = KV5M_PRINCIPAL ;
princ -> realm = make_data ( r , rlen ) ;
princ -> data = data ;
princ -> length = count ;
r = NULL ;
data = NULL ;
}
if ( data ) {
while ( -- count >= 0 ) {
free ( data [ count ] . data ) ;
}
free ( data ) ;
}
free ( r ) ;
return retval ;
} | 1,334 |
1 | struct udev_device *udev_monitor_receive_device(struct udev_monitor *udev_monitor)
{
struct udev_device *udev_device;
struct msghdr smsg;
struct iovec iov;
char cred_msg[CMSG_SPACE(sizeof(struct ucred))];
char buf[4096];
size_t bufpos;
int devpath_set = 0;
int subsystem_set = 0;
int action_set = 0;
int maj = 0;
int min = 0;
if (udev_monitor == NULL)
return NULL;
memset(buf, 0x00, sizeof(buf));
iov.iov_base = &buf;
iov.iov_len = sizeof(buf);
memset (&smsg, 0x00, sizeof(struct msghdr));
smsg.msg_iov = &iov;
smsg.msg_iovlen = 1;
smsg.msg_control = cred_msg;
smsg.msg_controllen = sizeof(cred_msg);
if (recvmsg(udev_monitor->sock, &smsg, 0) < 0) {
if (errno != EINTR)
info(udev_monitor->udev, "unable to receive message");
return NULL;
}
if (udev_monitor->sun.sun_family != 0) {
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&smsg);
struct ucred *cred = (struct ucred *)CMSG_DATA (cmsg);
if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) {
info(udev_monitor->udev, "no sender credentials received, message ignored");
return NULL;
}
if (cred->uid != 0) {
info(udev_monitor->udev, "sender uid=%d, message ignored", cred->uid);
return NULL;
}
}
/* skip header */
bufpos = strlen(buf) + 1;
if (bufpos < sizeof("a@/d") || bufpos >= sizeof(buf)) {
info(udev_monitor->udev, "invalid message length");
return NULL;
}
/* check message header */
if (strstr(buf, "@/") == NULL) {
info(udev_monitor->udev, "unrecognized message header");
return NULL;
}
udev_device = device_new(udev_monitor->udev);
if (udev_device == NULL) {
return NULL;
}
while (bufpos < sizeof(buf)) {
char *key;
size_t keylen;
key = &buf[bufpos];
keylen = strlen(key);
if (keylen == 0)
break;
bufpos += keylen + 1;
if (strncmp(key, "DEVPATH=", 8) == 0) {
char path[UTIL_PATH_SIZE];
util_strlcpy(path, udev_get_sys_path(udev_monitor->udev), sizeof(path));
util_strlcat(path, &key[8], sizeof(path));
udev_device_set_syspath(udev_device, path);
devpath_set = 1;
} else if (strncmp(key, "SUBSYSTEM=", 10) == 0) {
udev_device_set_subsystem(udev_device, &key[10]);
subsystem_set = 1;
} else if (strncmp(key, "DEVTYPE=", 8) == 0) {
udev_device_set_devtype(udev_device, &key[8]);
} else if (strncmp(key, "DEVNAME=", 8) == 0) {
udev_device_set_devnode(udev_device, &key[8]);
} else if (strncmp(key, "DEVLINKS=", 9) == 0) {
char devlinks[UTIL_PATH_SIZE];
char *slink;
char *next;
util_strlcpy(devlinks, &key[9], sizeof(devlinks));
slink = devlinks;
next = strchr(slink, ' ');
while (next != NULL) {
next[0] = '\0';
udev_device_add_devlink(udev_device, slink);
slink = &next[1];
next = strchr(slink, ' ');
}
if (slink[0] != '\0')
udev_device_add_devlink(udev_device, slink);
} else if (strncmp(key, "DRIVER=", 7) == 0) {
udev_device_set_driver(udev_device, &key[7]);
} else if (strncmp(key, "ACTION=", 7) == 0) {
udev_device_set_action(udev_device, &key[7]);
action_set = 1;
} else if (strncmp(key, "MAJOR=", 6) == 0) {
maj = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "MINOR=", 6) == 0) {
min = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "DEVPATH_OLD=", 12) == 0) {
udev_device_set_devpath_old(udev_device, &key[12]);
} else if (strncmp(key, "PHYSDEVPATH=", 12) == 0) {
udev_device_set_physdevpath(udev_device, &key[12]);
} else if (strncmp(key, "SEQNUM=", 7) == 0) {
udev_device_set_seqnum(udev_device, strtoull(&key[7], NULL, 10));
} else if (strncmp(key, "TIMEOUT=", 8) == 0) {
udev_device_set_timeout(udev_device, strtoull(&key[8], NULL, 10));
} else if (strncmp(key, "PHYSDEV", 7) == 0) {
/* skip deprecated values */
continue;
} else {
udev_device_add_property_from_string(udev_device, key);
}
}
if (!devpath_set || !subsystem_set || !action_set) {
info(udev_monitor->udev, "missing values, skip\n");
udev_device_unref(udev_device);
return NULL;
}
if (maj > 0)
udev_device_set_devnum(udev_device, makedev(maj, min));
udev_device_set_info_loaded(udev_device);
return udev_device;
} | struct udev_device *udev_monitor_receive_device(struct udev_monitor *udev_monitor)
{
struct udev_device *udev_device;
struct msghdr smsg;
struct iovec iov;
char cred_msg[CMSG_SPACE(sizeof(struct ucred))];
char buf[4096];
size_t bufpos;
int devpath_set = 0;
int subsystem_set = 0;
int action_set = 0;
int maj = 0;
int min = 0;
if (udev_monitor == NULL)
return NULL;
memset(buf, 0x00, sizeof(buf));
iov.iov_base = &buf;
iov.iov_len = sizeof(buf);
memset (&smsg, 0x00, sizeof(struct msghdr));
smsg.msg_iov = &iov;
smsg.msg_iovlen = 1;
smsg.msg_control = cred_msg;
smsg.msg_controllen = sizeof(cred_msg);
if (recvmsg(udev_monitor->sock, &smsg, 0) < 0) {
if (errno != EINTR)
info(udev_monitor->udev, "unable to receive message");
return NULL;
}
if (udev_monitor->sun.sun_family != 0) {
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&smsg);
struct ucred *cred = (struct ucred *)CMSG_DATA (cmsg);
if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) {
info(udev_monitor->udev, "no sender credentials received, message ignored");
return NULL;
}
if (cred->uid != 0) {
info(udev_monitor->udev, "sender uid=%d, message ignored", cred->uid);
return NULL;
}
}
bufpos = strlen(buf) + 1;
if (bufpos < sizeof("a@/d") || bufpos >= sizeof(buf)) {
info(udev_monitor->udev, "invalid message length");
return NULL;
}
if (strstr(buf, "@/") == NULL) {
info(udev_monitor->udev, "unrecognized message header");
return NULL;
}
udev_device = device_new(udev_monitor->udev);
if (udev_device == NULL) {
return NULL;
}
while (bufpos < sizeof(buf)) {
char *key;
size_t keylen;
key = &buf[bufpos];
keylen = strlen(key);
if (keylen == 0)
break;
bufpos += keylen + 1;
if (strncmp(key, "DEVPATH=", 8) == 0) {
char path[UTIL_PATH_SIZE];
util_strlcpy(path, udev_get_sys_path(udev_monitor->udev), sizeof(path));
util_strlcat(path, &key[8], sizeof(path));
udev_device_set_syspath(udev_device, path);
devpath_set = 1;
} else if (strncmp(key, "SUBSYSTEM=", 10) == 0) {
udev_device_set_subsystem(udev_device, &key[10]);
subsystem_set = 1;
} else if (strncmp(key, "DEVTYPE=", 8) == 0) {
udev_device_set_devtype(udev_device, &key[8]);
} else if (strncmp(key, "DEVNAME=", 8) == 0) {
udev_device_set_devnode(udev_device, &key[8]);
} else if (strncmp(key, "DEVLINKS=", 9) == 0) {
char devlinks[UTIL_PATH_SIZE];
char *slink;
char *next;
util_strlcpy(devlinks, &key[9], sizeof(devlinks));
slink = devlinks;
next = strchr(slink, ' ');
while (next != NULL) {
next[0] = '\0';
udev_device_add_devlink(udev_device, slink);
slink = &next[1];
next = strchr(slink, ' ');
}
if (slink[0] != '\0')
udev_device_add_devlink(udev_device, slink);
} else if (strncmp(key, "DRIVER=", 7) == 0) {
udev_device_set_driver(udev_device, &key[7]);
} else if (strncmp(key, "ACTION=", 7) == 0) {
udev_device_set_action(udev_device, &key[7]);
action_set = 1;
} else if (strncmp(key, "MAJOR=", 6) == 0) {
maj = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "MINOR=", 6) == 0) {
min = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "DEVPATH_OLD=", 12) == 0) {
udev_device_set_devpath_old(udev_device, &key[12]);
} else if (strncmp(key, "PHYSDEVPATH=", 12) == 0) {
udev_device_set_physdevpath(udev_device, &key[12]);
} else if (strncmp(key, "SEQNUM=", 7) == 0) {
udev_device_set_seqnum(udev_device, strtoull(&key[7], NULL, 10));
} else if (strncmp(key, "TIMEOUT=", 8) == 0) {
udev_device_set_timeout(udev_device, strtoull(&key[8], NULL, 10));
} else if (strncmp(key, "PHYSDEV", 7) == 0) {
continue;
} else {
udev_device_add_property_from_string(udev_device, key);
}
}
if (!devpath_set || !subsystem_set || !action_set) {
info(udev_monitor->udev, "missing values, skip\n");
udev_device_unref(udev_device);
return NULL;
}
if (maj > 0)
udev_device_set_devnum(udev_device, makedev(maj, min));
udev_device_set_info_loaded(udev_device);
return udev_device;
} | 1,335 |
0 | BGD_DECLARE(void *) gdImageWBMPPtr(gdImagePtr im, int *size, int fg)
{
void *rv;
gdIOCtx *out = gdNewDynamicCtx(2048, NULL);
if (out == NULL) return NULL;
if (!_gdImageWBMPCtx(im, fg, out)) {
rv = gdDPExtractData(out, size);
} else {
rv = NULL;
}
out->gd_free(out);
return rv;
} | BGD_DECLARE(void *) gdImageWBMPPtr(gdImagePtr im, int *size, int fg)
{
void *rv;
gdIOCtx *out = gdNewDynamicCtx(2048, NULL);
if (out == NULL) return NULL;
if (!_gdImageWBMPCtx(im, fg, out)) {
rv = gdDPExtractData(out, size);
} else {
rv = NULL;
}
out->gd_free(out);
return rv;
} | 1,338 |
0 | static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce) { /* * Check if @iova is above 2^X-1, where X is the minimum of MGAW * in CAP_REG and AW in context-entry. */ return !(iova & ~(vtd_iova_limit(ce) - 1)); } | static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce) { return !(iova & ~(vtd_iova_limit(ce) - 1)); } | 1,339 |
0 | static void nautilus_directory_invalidate_file_attributes ( NautilusDirectory * directory , NautilusFileAttributes file_attributes ) {
GList * node ;
cancel_loading_attributes ( directory , file_attributes ) ;
for ( node = directory -> details -> file_list ;
node != NULL ;
node = node -> next ) {
nautilus_file_invalidate_attributes_internal ( NAUTILUS_FILE ( node -> data ) , file_attributes ) ;
}
if ( directory -> details -> as_file != NULL ) {
nautilus_file_invalidate_attributes_internal ( directory -> details -> as_file , file_attributes ) ;
}
} | static void nautilus_directory_invalidate_file_attributes ( NautilusDirectory * directory , NautilusFileAttributes file_attributes ) {
GList * node ;
cancel_loading_attributes ( directory , file_attributes ) ;
for ( node = directory -> details -> file_list ;
node != NULL ;
node = node -> next ) {
nautilus_file_invalidate_attributes_internal ( NAUTILUS_FILE ( node -> data ) , file_attributes ) ;
}
if ( directory -> details -> as_file != NULL ) {
nautilus_file_invalidate_attributes_internal ( directory -> details -> as_file , file_attributes ) ;
}
} | 1,340 |
0 | DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) { const char *value; BlockBackend *blk; DriveInfo *dinfo = NULL; QDict *bs_opts; QemuOpts *legacy_opts; DriveMediaType media = MEDIA_DISK; BlockInterfaceType type; int cyls, heads, secs, translation; int max_devs, bus_id, unit_id, index; const char *devaddr; const char *werror, *rerror; bool read_only = false; bool copy_on_read; const char *serial; const char *filename; Error *local_err = NULL; int i; const char *deprecated[] = { "serial", "trans", "secs", "heads", "cyls", "addr" }; /* Change legacy command line options into QMP ones */ static const struct { const char *from; const char *to; } opt_renames[] = { { "iops", "throttling.iops-total" }, { "iops_rd", "throttling.iops-read" }, { "iops_wr", "throttling.iops-write" }, { "bps", "throttling.bps-total" }, { "bps_rd", "throttling.bps-read" }, { "bps_wr", "throttling.bps-write" }, { "iops_max", "throttling.iops-total-max" }, { "iops_rd_max", "throttling.iops-read-max" }, { "iops_wr_max", "throttling.iops-write-max" }, { "bps_max", "throttling.bps-total-max" }, { "bps_rd_max", "throttling.bps-read-max" }, { "bps_wr_max", "throttling.bps-write-max" }, { "iops_size", "throttling.iops-size" }, { "group", "throttling.group" }, { "readonly", BDRV_OPT_READ_ONLY }, }; for (i = 0; i < ARRAY_SIZE(opt_renames); i++) { qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to, &local_err); if (local_err) { error_report_err(local_err); return NULL; } } value = qemu_opt_get(all_opts, "cache"); if (value) { int flags = 0; bool writethrough; if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) { error_report("invalid cache option"); return NULL; } /* Specific options take precedence */ if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) { qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB, !writethrough, &error_abort); } if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) { qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT, !!(flags & BDRV_O_NOCACHE), &error_abort); } if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) { qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH, !!(flags & BDRV_O_NO_FLUSH), &error_abort); } qemu_opt_unset(all_opts, "cache"); } /* Get a QDict for processing the options */ bs_opts = qdict_new(); qemu_opts_to_qdict(all_opts, bs_opts); legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err); if (local_err) { error_report_err(local_err); goto fail; } /* Deprecated option boot=[on|off] */ if (qemu_opt_get(legacy_opts, "boot") != NULL) { fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be " "ignored. Future versions will reject this parameter. Please " "update your scripts.\n"); } /* Other deprecated options */ if (!qtest_enabled()) { for (i = 0; i < ARRAY_SIZE(deprecated); i++) { if (qemu_opt_get(legacy_opts, deprecated[i]) != NULL) { error_report("'%s' is deprecated, please use the corresponding " "option of '-device' instead", deprecated[i]); } } } /* Media type */ value = qemu_opt_get(legacy_opts, "media"); if (value) { if (!strcmp(value, "disk")) { media = MEDIA_DISK; } else if (!strcmp(value, "cdrom")) { media = MEDIA_CDROM; read_only = true; } else { error_report("'%s' invalid media", value); goto fail; } } /* copy-on-read is disabled with a warning for read-only devices */ read_only |= qemu_opt_get_bool(legacy_opts, BDRV_OPT_READ_ONLY, false); copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false); if (read_only && copy_on_read) { error_report("warning: disabling copy-on-read on read-only drive"); copy_on_read = false; } qdict_put_str(bs_opts, BDRV_OPT_READ_ONLY, read_only ? "on" : "off"); qdict_put_str(bs_opts, "copy-on-read", copy_on_read ? "on" : "off"); /* Controller type */ value = qemu_opt_get(legacy_opts, "if"); if (value) { for (type = 0; type < IF_COUNT && strcmp(value, if_name[type]); type++) { } if (type == IF_COUNT) { error_report("unsupported bus type '%s'", value); goto fail; } } else { type = block_default_type; } /* Geometry */ cyls = qemu_opt_get_number(legacy_opts, "cyls", 0); heads = qemu_opt_get_number(legacy_opts, "heads", 0); secs = qemu_opt_get_number(legacy_opts, "secs", 0); if (cyls || heads || secs) { if (cyls < 1) { error_report("invalid physical cyls number"); goto fail; } if (heads < 1) { error_report("invalid physical heads number"); goto fail; } if (secs < 1) { error_report("invalid physical secs number"); goto fail; } } translation = BIOS_ATA_TRANSLATION_AUTO; value = qemu_opt_get(legacy_opts, "trans"); if (value != NULL) { if (!cyls) { error_report("'%s' trans must be used with cyls, heads and secs", value); goto fail; } if (!strcmp(value, "none")) { translation = BIOS_ATA_TRANSLATION_NONE; } else if (!strcmp(value, "lba")) { translation = BIOS_ATA_TRANSLATION_LBA; } else if (!strcmp(value, "large")) { translation = BIOS_ATA_TRANSLATION_LARGE; } else if (!strcmp(value, "rechs")) { translation = BIOS_ATA_TRANSLATION_RECHS; } else if (!strcmp(value, "auto")) { translation = BIOS_ATA_TRANSLATION_AUTO; } else { error_report("'%s' invalid translation type", value); goto fail; } } if (media == MEDIA_CDROM) { if (cyls || secs || heads) { error_report("CHS can't be set with media=cdrom"); goto fail; } } /* Device address specified by bus/unit or index. * If none was specified, try to find the first free one. */ bus_id = qemu_opt_get_number(legacy_opts, "bus", 0); unit_id = qemu_opt_get_number(legacy_opts, "unit", -1); index = qemu_opt_get_number(legacy_opts, "index", -1); max_devs = if_max_devs[type]; if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report("index cannot be used with bus and unit"); goto fail; } bus_id = drive_index_to_bus_id(type, index); unit_id = drive_index_to_unit_id(type, index); } if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } if (max_devs && unit_id >= max_devs) { error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); goto fail; } if (drive_get(type, bus_id, unit_id) != NULL) { error_report("drive with bus=%d, unit=%d (index=%d) exists", bus_id, unit_id, index); goto fail; } /* Serial number */ serial = qemu_opt_get(legacy_opts, "serial"); /* no id supplied -> create one */ if (qemu_opts_id(all_opts) == NULL) { char *new_id; const char *mediastr = ""; if (type == IF_IDE || type == IF_SCSI) { mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; } if (max_devs) { new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id, mediastr, unit_id); } else { new_id = g_strdup_printf("%s%s%i", if_name[type], mediastr, unit_id); } qdict_put_str(bs_opts, "id", new_id); g_free(new_id); } /* Add virtio block device */ devaddr = qemu_opt_get(legacy_opts, "addr"); if (devaddr && type != IF_VIRTIO) { error_report("addr is not supported by this bus type"); goto fail; } if (type == IF_VIRTIO) { QemuOpts *devopts; devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, &error_abort); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort); } else { qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort); } qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"), &error_abort); if (devaddr) { qemu_opt_set(devopts, "addr", devaddr, &error_abort); } } filename = qemu_opt_get(legacy_opts, "file"); /* Check werror/rerror compatibility with if=... */ werror = qemu_opt_get(legacy_opts, "werror"); if (werror != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_report("werror is not supported by this bus type"); goto fail; } qdict_put_str(bs_opts, "werror", werror); } rerror = qemu_opt_get(legacy_opts, "rerror"); if (rerror != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report("rerror is not supported by this bus type"); goto fail; } qdict_put_str(bs_opts, "rerror", rerror); } /* Actual block device init: Functionality shared with blockdev-add */ blk = blockdev_init(filename, bs_opts, &local_err); bs_opts = NULL; if (!blk) { if (local_err) { error_report_err(local_err); } goto fail; } else { assert(!local_err); } /* Create legacy DriveInfo */ dinfo = g_malloc0(sizeof(*dinfo)); dinfo->opts = all_opts; dinfo->cyls = cyls; dinfo->heads = heads; dinfo->secs = secs; dinfo->trans = translation; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->devaddr = devaddr; dinfo->serial = g_strdup(serial); blk_set_legacy_dinfo(blk, dinfo); switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: dinfo->media_cd = media == MEDIA_CDROM; break; default: break; } fail: qemu_opts_del(legacy_opts); QDECREF(bs_opts); return dinfo; } | DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) { const char *value; BlockBackend *blk; DriveInfo *dinfo = NULL; QDict *bs_opts; QemuOpts *legacy_opts; DriveMediaType media = MEDIA_DISK; BlockInterfaceType type; int cyls, heads, secs, translation; int max_devs, bus_id, unit_id, index; const char *devaddr; const char *werror, *rerror; bool read_only = false; bool copy_on_read; const char *serial; const char *filename; Error *local_err = NULL; int i; const char *deprecated[] = { "serial", "trans", "secs", "heads", "cyls", "addr" }; static const struct { const char *from; const char *to; } opt_renames[] = { { "iops", "throttling.iops-total" }, { "iops_rd", "throttling.iops-read" }, { "iops_wr", "throttling.iops-write" }, { "bps", "throttling.bps-total" }, { "bps_rd", "throttling.bps-read" }, { "bps_wr", "throttling.bps-write" }, { "iops_max", "throttling.iops-total-max" }, { "iops_rd_max", "throttling.iops-read-max" }, { "iops_wr_max", "throttling.iops-write-max" }, { "bps_max", "throttling.bps-total-max" }, { "bps_rd_max", "throttling.bps-read-max" }, { "bps_wr_max", "throttling.bps-write-max" }, { "iops_size", "throttling.iops-size" }, { "group", "throttling.group" }, { "readonly", BDRV_OPT_READ_ONLY }, }; for (i = 0; i < ARRAY_SIZE(opt_renames); i++) { qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to, &local_err); if (local_err) { error_report_err(local_err); return NULL; } } value = qemu_opt_get(all_opts, "cache"); if (value) { int flags = 0; bool writethrough; if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) { error_report("invalid cache option"); return NULL; } if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) { qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB, !writethrough, &error_abort); } if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) { qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT, !!(flags & BDRV_O_NOCACHE), &error_abort); } if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) { qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH, !!(flags & BDRV_O_NO_FLUSH), &error_abort); } qemu_opt_unset(all_opts, "cache"); } bs_opts = qdict_new(); qemu_opts_to_qdict(all_opts, bs_opts); legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err); if (local_err) { error_report_err(local_err); goto fail; } if (qemu_opt_get(legacy_opts, "boot") != NULL) { fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be " "ignored. Future versions will reject this parameter. Please " "update your scripts.\n"); } if (!qtest_enabled()) { for (i = 0; i < ARRAY_SIZE(deprecated); i++) { if (qemu_opt_get(legacy_opts, deprecated[i]) != NULL) { error_report("'%s' is deprecated, please use the corresponding " "option of '-device' instead", deprecated[i]); } } } value = qemu_opt_get(legacy_opts, "media"); if (value) { if (!strcmp(value, "disk")) { media = MEDIA_DISK; } else if (!strcmp(value, "cdrom")) { media = MEDIA_CDROM; read_only = true; } else { error_report("'%s' invalid media", value); goto fail; } } read_only |= qemu_opt_get_bool(legacy_opts, BDRV_OPT_READ_ONLY, false); copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false); if (read_only && copy_on_read) { error_report("warning: disabling copy-on-read on read-only drive"); copy_on_read = false; } qdict_put_str(bs_opts, BDRV_OPT_READ_ONLY, read_only ? "on" : "off"); qdict_put_str(bs_opts, "copy-on-read", copy_on_read ? "on" : "off"); value = qemu_opt_get(legacy_opts, "if"); if (value) { for (type = 0; type < IF_COUNT && strcmp(value, if_name[type]); type++) { } if (type == IF_COUNT) { error_report("unsupported bus type '%s'", value); goto fail; } } else { type = block_default_type; } cyls = qemu_opt_get_number(legacy_opts, "cyls", 0); heads = qemu_opt_get_number(legacy_opts, "heads", 0); secs = qemu_opt_get_number(legacy_opts, "secs", 0); if (cyls || heads || secs) { if (cyls < 1) { error_report("invalid physical cyls number"); goto fail; } if (heads < 1) { error_report("invalid physical heads number"); goto fail; } if (secs < 1) { error_report("invalid physical secs number"); goto fail; } } translation = BIOS_ATA_TRANSLATION_AUTO; value = qemu_opt_get(legacy_opts, "trans"); if (value != NULL) { if (!cyls) { error_report("'%s' trans must be used with cyls, heads and secs", value); goto fail; } if (!strcmp(value, "none")) { translation = BIOS_ATA_TRANSLATION_NONE; } else if (!strcmp(value, "lba")) { translation = BIOS_ATA_TRANSLATION_LBA; } else if (!strcmp(value, "large")) { translation = BIOS_ATA_TRANSLATION_LARGE; } else if (!strcmp(value, "rechs")) { translation = BIOS_ATA_TRANSLATION_RECHS; } else if (!strcmp(value, "auto")) { translation = BIOS_ATA_TRANSLATION_AUTO; } else { error_report("'%s' invalid translation type", value); goto fail; } } if (media == MEDIA_CDROM) { if (cyls || secs || heads) { error_report("CHS can't be set with media=cdrom"); goto fail; } } bus_id = qemu_opt_get_number(legacy_opts, "bus", 0); unit_id = qemu_opt_get_number(legacy_opts, "unit", -1); index = qemu_opt_get_number(legacy_opts, "index", -1); max_devs = if_max_devs[type]; if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report("index cannot be used with bus and unit"); goto fail; } bus_id = drive_index_to_bus_id(type, index); unit_id = drive_index_to_unit_id(type, index); } if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } if (max_devs && unit_id >= max_devs) { error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); goto fail; } if (drive_get(type, bus_id, unit_id) != NULL) { error_report("drive with bus=%d, unit=%d (index=%d) exists", bus_id, unit_id, index); goto fail; } serial = qemu_opt_get(legacy_opts, "serial"); if (qemu_opts_id(all_opts) == NULL) { char *new_id; const char *mediastr = ""; if (type == IF_IDE || type == IF_SCSI) { mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; } if (max_devs) { new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id, mediastr, unit_id); } else { new_id = g_strdup_printf("%s%s%i", if_name[type], mediastr, unit_id); } qdict_put_str(bs_opts, "id", new_id); g_free(new_id); } devaddr = qemu_opt_get(legacy_opts, "addr"); if (devaddr && type != IF_VIRTIO) { error_report("addr is not supported by this bus type"); goto fail; } if (type == IF_VIRTIO) { QemuOpts *devopts; devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, &error_abort); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(devopts, "driver", "virtio-blk-ccw", &error_abort); } else { qemu_opt_set(devopts, "driver", "virtio-blk-pci", &error_abort); } qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"), &error_abort); if (devaddr) { qemu_opt_set(devopts, "addr", devaddr, &error_abort); } } filename = qemu_opt_get(legacy_opts, "file"); werror = qemu_opt_get(legacy_opts, "werror"); if (werror != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_report("werror is not supported by this bus type"); goto fail; } qdict_put_str(bs_opts, "werror", werror); } rerror = qemu_opt_get(legacy_opts, "rerror"); if (rerror != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report("rerror is not supported by this bus type"); goto fail; } qdict_put_str(bs_opts, "rerror", rerror); } blk = blockdev_init(filename, bs_opts, &local_err); bs_opts = NULL; if (!blk) { if (local_err) { error_report_err(local_err); } goto fail; } else { assert(!local_err); } dinfo = g_malloc0(sizeof(*dinfo)); dinfo->opts = all_opts; dinfo->cyls = cyls; dinfo->heads = heads; dinfo->secs = secs; dinfo->trans = translation; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->devaddr = devaddr; dinfo->serial = g_strdup(serial); blk_set_legacy_dinfo(blk, dinfo); switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: dinfo->media_cd = media == MEDIA_CDROM; break; default: break; } fail: qemu_opts_del(legacy_opts); QDECREF(bs_opts); return dinfo; } | 1,341 |
0 | int qemuAssignDeviceRNGAlias ( virDomainDefPtr def , virDomainRNGDefPtr rng ) {
size_t i ;
int maxidx = 0 ;
int idx ;
for ( i = 0 ;
i < def -> nrngs ;
i ++ ) {
if ( ( idx = qemuDomainDeviceAliasIndex ( & def -> rngs [ i ] -> info , "rng" ) ) >= maxidx ) maxidx = idx + 1 ;
}
if ( virAsprintf ( & rng -> info . alias , "rng%d" , maxidx ) < 0 ) return - 1 ;
return 0 ;
} | int qemuAssignDeviceRNGAlias ( virDomainDefPtr def , virDomainRNGDefPtr rng ) {
size_t i ;
int maxidx = 0 ;
int idx ;
for ( i = 0 ;
i < def -> nrngs ;
i ++ ) {
if ( ( idx = qemuDomainDeviceAliasIndex ( & def -> rngs [ i ] -> info , "rng" ) ) >= maxidx ) maxidx = idx + 1 ;
}
if ( virAsprintf ( & rng -> info . alias , "rng%d" , maxidx ) < 0 ) return - 1 ;
return 0 ;
} | 1,342 |
1 | void dev_load(struct net *net, const char *name)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
rcu_read_unlock();
if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
} | void dev_load(struct net *net, const char *name)
{
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, name);
rcu_read_unlock();
if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
} | 1,343 |
1 | struct udev_device *udev_monitor_receive_device(struct udev_monitor *udev_monitor)
{
struct udev_device *udev_device;
struct msghdr smsg;
struct iovec iov;
char cred_msg[CMSG_SPACE(sizeof(struct ucred))];
struct cmsghdr *cmsg;
struct ucred *cred;
char buf[4096];
size_t bufpos;
int devpath_set = 0;
int subsystem_set = 0;
int action_set = 0;
int maj = 0;
int min = 0;
if (udev_monitor == NULL)
return NULL;
memset(buf, 0x00, sizeof(buf));
iov.iov_base = &buf;
iov.iov_len = sizeof(buf);
memset (&smsg, 0x00, sizeof(struct msghdr));
smsg.msg_iov = &iov;
smsg.msg_iovlen = 1;
smsg.msg_control = cred_msg;
smsg.msg_controllen = sizeof(cred_msg);
if (recvmsg(udev_monitor->sock, &smsg, 0) < 0) {
if (errno != EINTR)
info(udev_monitor->udev, "unable to receive message");
return NULL;
}
cmsg = CMSG_FIRSTHDR(&smsg);
if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) {
info(udev_monitor->udev, "no sender credentials received, message ignored");
return NULL;
}
cred = (struct ucred *)CMSG_DATA(cmsg);
if (cred->uid != 0) {
info(udev_monitor->udev, "sender uid=%d, message ignored", cred->uid);
return NULL;
}
/* skip header */
bufpos = strlen(buf) + 1;
if (bufpos < sizeof("a@/d") || bufpos >= sizeof(buf)) {
info(udev_monitor->udev, "invalid message length");
return NULL;
}
/* check message header */
if (strstr(buf, "@/") == NULL) {
info(udev_monitor->udev, "unrecognized message header");
return NULL;
}
udev_device = device_new(udev_monitor->udev);
if (udev_device == NULL) {
return NULL;
}
while (bufpos < sizeof(buf)) {
char *key;
size_t keylen;
key = &buf[bufpos];
keylen = strlen(key);
if (keylen == 0)
break;
bufpos += keylen + 1;
if (strncmp(key, "DEVPATH=", 8) == 0) {
char path[UTIL_PATH_SIZE];
util_strlcpy(path, udev_get_sys_path(udev_monitor->udev), sizeof(path));
util_strlcat(path, &key[8], sizeof(path));
udev_device_set_syspath(udev_device, path);
devpath_set = 1;
} else if (strncmp(key, "SUBSYSTEM=", 10) == 0) {
udev_device_set_subsystem(udev_device, &key[10]);
subsystem_set = 1;
} else if (strncmp(key, "DEVTYPE=", 8) == 0) {
udev_device_set_devtype(udev_device, &key[8]);
} else if (strncmp(key, "DEVNAME=", 8) == 0) {
udev_device_set_devnode(udev_device, &key[8]);
} else if (strncmp(key, "DEVLINKS=", 9) == 0) {
char devlinks[UTIL_PATH_SIZE];
char *slink;
char *next;
util_strlcpy(devlinks, &key[9], sizeof(devlinks));
slink = devlinks;
next = strchr(slink, ' ');
while (next != NULL) {
next[0] = '\0';
udev_device_add_devlink(udev_device, slink);
slink = &next[1];
next = strchr(slink, ' ');
}
if (slink[0] != '\0')
udev_device_add_devlink(udev_device, slink);
} else if (strncmp(key, "DRIVER=", 7) == 0) {
udev_device_set_driver(udev_device, &key[7]);
} else if (strncmp(key, "ACTION=", 7) == 0) {
udev_device_set_action(udev_device, &key[7]);
action_set = 1;
} else if (strncmp(key, "MAJOR=", 6) == 0) {
maj = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "MINOR=", 6) == 0) {
min = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "DEVPATH_OLD=", 12) == 0) {
udev_device_set_devpath_old(udev_device, &key[12]);
} else if (strncmp(key, "PHYSDEVPATH=", 12) == 0) {
udev_device_set_physdevpath(udev_device, &key[12]);
} else if (strncmp(key, "SEQNUM=", 7) == 0) {
udev_device_set_seqnum(udev_device, strtoull(&key[7], NULL, 10));
} else if (strncmp(key, "TIMEOUT=", 8) == 0) {
udev_device_set_timeout(udev_device, strtoull(&key[8], NULL, 10));
} else if (strncmp(key, "PHYSDEV", 7) == 0) {
/* skip deprecated values */
continue;
} else {
udev_device_add_property_from_string(udev_device, key);
}
}
if (!devpath_set || !subsystem_set || !action_set) {
info(udev_monitor->udev, "missing values, skip\n");
udev_device_unref(udev_device);
return NULL;
}
if (maj > 0)
udev_device_set_devnum(udev_device, makedev(maj, min));
udev_device_set_info_loaded(udev_device);
return udev_device;
} | struct udev_device *udev_monitor_receive_device(struct udev_monitor *udev_monitor)
{
struct udev_device *udev_device;
struct msghdr smsg;
struct iovec iov;
char cred_msg[CMSG_SPACE(sizeof(struct ucred))];
struct cmsghdr *cmsg;
struct ucred *cred;
char buf[4096];
size_t bufpos;
int devpath_set = 0;
int subsystem_set = 0;
int action_set = 0;
int maj = 0;
int min = 0;
if (udev_monitor == NULL)
return NULL;
memset(buf, 0x00, sizeof(buf));
iov.iov_base = &buf;
iov.iov_len = sizeof(buf);
memset (&smsg, 0x00, sizeof(struct msghdr));
smsg.msg_iov = &iov;
smsg.msg_iovlen = 1;
smsg.msg_control = cred_msg;
smsg.msg_controllen = sizeof(cred_msg);
if (recvmsg(udev_monitor->sock, &smsg, 0) < 0) {
if (errno != EINTR)
info(udev_monitor->udev, "unable to receive message");
return NULL;
}
cmsg = CMSG_FIRSTHDR(&smsg);
if (cmsg == NULL || cmsg->cmsg_type != SCM_CREDENTIALS) {
info(udev_monitor->udev, "no sender credentials received, message ignored");
return NULL;
}
cred = (struct ucred *)CMSG_DATA(cmsg);
if (cred->uid != 0) {
info(udev_monitor->udev, "sender uid=%d, message ignored", cred->uid);
return NULL;
}
bufpos = strlen(buf) + 1;
if (bufpos < sizeof("a@/d") || bufpos >= sizeof(buf)) {
info(udev_monitor->udev, "invalid message length");
return NULL;
}
if (strstr(buf, "@/") == NULL) {
info(udev_monitor->udev, "unrecognized message header");
return NULL;
}
udev_device = device_new(udev_monitor->udev);
if (udev_device == NULL) {
return NULL;
}
while (bufpos < sizeof(buf)) {
char *key;
size_t keylen;
key = &buf[bufpos];
keylen = strlen(key);
if (keylen == 0)
break;
bufpos += keylen + 1;
if (strncmp(key, "DEVPATH=", 8) == 0) {
char path[UTIL_PATH_SIZE];
util_strlcpy(path, udev_get_sys_path(udev_monitor->udev), sizeof(path));
util_strlcat(path, &key[8], sizeof(path));
udev_device_set_syspath(udev_device, path);
devpath_set = 1;
} else if (strncmp(key, "SUBSYSTEM=", 10) == 0) {
udev_device_set_subsystem(udev_device, &key[10]);
subsystem_set = 1;
} else if (strncmp(key, "DEVTYPE=", 8) == 0) {
udev_device_set_devtype(udev_device, &key[8]);
} else if (strncmp(key, "DEVNAME=", 8) == 0) {
udev_device_set_devnode(udev_device, &key[8]);
} else if (strncmp(key, "DEVLINKS=", 9) == 0) {
char devlinks[UTIL_PATH_SIZE];
char *slink;
char *next;
util_strlcpy(devlinks, &key[9], sizeof(devlinks));
slink = devlinks;
next = strchr(slink, ' ');
while (next != NULL) {
next[0] = '\0';
udev_device_add_devlink(udev_device, slink);
slink = &next[1];
next = strchr(slink, ' ');
}
if (slink[0] != '\0')
udev_device_add_devlink(udev_device, slink);
} else if (strncmp(key, "DRIVER=", 7) == 0) {
udev_device_set_driver(udev_device, &key[7]);
} else if (strncmp(key, "ACTION=", 7) == 0) {
udev_device_set_action(udev_device, &key[7]);
action_set = 1;
} else if (strncmp(key, "MAJOR=", 6) == 0) {
maj = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "MINOR=", 6) == 0) {
min = strtoull(&key[6], NULL, 10);
} else if (strncmp(key, "DEVPATH_OLD=", 12) == 0) {
udev_device_set_devpath_old(udev_device, &key[12]);
} else if (strncmp(key, "PHYSDEVPATH=", 12) == 0) {
udev_device_set_physdevpath(udev_device, &key[12]);
} else if (strncmp(key, "SEQNUM=", 7) == 0) {
udev_device_set_seqnum(udev_device, strtoull(&key[7], NULL, 10));
} else if (strncmp(key, "TIMEOUT=", 8) == 0) {
udev_device_set_timeout(udev_device, strtoull(&key[8], NULL, 10));
} else if (strncmp(key, "PHYSDEV", 7) == 0) {
continue;
} else {
udev_device_add_property_from_string(udev_device, key);
}
}
if (!devpath_set || !subsystem_set || !action_set) {
info(udev_monitor->udev, "missing values, skip\n");
udev_device_unref(udev_device);
return NULL;
}
if (maj > 0)
udev_device_set_devnum(udev_device, makedev(maj, min));
udev_device_set_info_loaded(udev_device);
return udev_device;
} | 1,344 |
0 | static int exif_scan_JPEG_header(image_info_type *ImageInfo) {
int section, sn;
int marker = 0, last_marker = M_PSEUDO, comment_correction=1;
int ll, lh;
unsigned char *Data;
size_t fpos, size, got, itemlen;
jpeg_sof_info sof_info;
for(section=0;;section++) {
// get marker byte, swallowing possible padding
// some software does not count the length bytes of COM section
// one company doing so is very much envolved in JPEG...
// so we accept too
if (last_marker==M_COM && comment_correction) {
comment_correction = 2;
}
do {
if ((marker = ImageInfo->infile->getc()) == EOF) {
raise_warning("File structure corrupted");
return 0;
}
if (last_marker==M_COM && comment_correction>0) {
if (marker!=0xFF) {
marker = 0xff;
comment_correction--;
} else {
last_marker = M_PSEUDO; /* stop skipping 0 for M_COM */
}
}
} while (marker == 0xff);
if (last_marker==M_COM && !comment_correction) {
raise_notice("Image has corrupt COM section: some software set "
"wrong length information");
}
if (last_marker==M_COM && comment_correction)
return M_EOI; /* ah illegal: char after COM section not 0xFF */
fpos = ImageInfo->infile->tell();
if (marker == 0xff) {
// 0xff is legal padding, but if we get that many, something's wrong.
raise_warning("To many padding bytes");
return 0;
}
/* Read the length of the section. */
if ((lh = ImageInfo->infile->getc()) == EOF) {
raise_warning("File structure corrupted");
return 0;
}
if ((ll = ImageInfo->infile->getc()) == EOF) {
raise_warning("File structure corrupted");
return 0;
}
itemlen = (lh << 8) | ll;
if (itemlen < 2) {
raise_warning("File structure corrupted");
return 0;
}
sn = exif_file_sections_add(ImageInfo, marker, itemlen+1, nullptr);
if (sn == -1) return 0;
Data = ImageInfo->file.list[sn].data;
/* Store first two pre-read bytes. */
Data[0] = (unsigned char)lh;
Data[1] = (unsigned char)ll;
String str = ImageInfo->infile->read(itemlen-2);
got = str.length();
if (got != itemlen-2) {
raise_warning("Error reading from file: "
"got=x%04lX(=%lu) != itemlen-2=x%04lX(=%lu)",
got, got, itemlen-2, itemlen-2);
return 0;
}
memcpy(Data+2, str.c_str(), got);
switch(marker) {
case M_SOS: /* stop before hitting compressed data */
// If reading entire image is requested, read the rest of the data.
if (ImageInfo->read_all) {
/* Determine how much file is left. */
fpos = ImageInfo->infile->tell();
size = ImageInfo->FileSize - fpos;
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, size, nullptr);
if (sn == -1) return 0;
Data = ImageInfo->file.list[sn].data;
str = ImageInfo->infile->read(size);
got = str.length();
if (got != size) {
raise_warning("Unexpected end of file reached");
return 0;
}
memcpy(Data, str.c_str(), got);
}
return 1;
case M_EOI: /* in case it's a tables-only JPEG stream */
raise_warning("No image in jpeg!");
return (ImageInfo->sections_found&(~FOUND_COMPUTED)) ? 1 : 0;
case M_COM: /* Comment section */
exif_process_COM(ImageInfo, (char *)Data, itemlen);
break;
case M_EXIF:
if (!(ImageInfo->sections_found&FOUND_IFD0)) {
/*ImageInfo->sections_found |= FOUND_EXIF;*/
/* Seen files from some 'U-lead' software with Vivitar scanner
that uses marker 31 later in the file (no clue what for!) */
exif_process_APP1(ImageInfo, (char *)Data, itemlen, fpos);
}
break;
case M_APP12:
exif_process_APP12(ImageInfo, (char *)Data, itemlen);
break;
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
if ((itemlen - 2) < 6) {
return 0;
}
exif_process_SOFn(Data, marker, &sof_info);
ImageInfo->Width = sof_info.width;
ImageInfo->Height = sof_info.height;
if (sof_info.num_components == 3) {
ImageInfo->IsColor = 1;
} else {
ImageInfo->IsColor = 0;
}
break;
default:
/* skip any other marker silently. */
break;
}
/* keep track of last marker */
last_marker = marker;
}
return 1;
} | static int exif_scan_JPEG_header(image_info_type *ImageInfo) {
int section, sn;
int marker = 0, last_marker = M_PSEUDO, comment_correction=1;
int ll, lh;
unsigned char *Data;
size_t fpos, size, got, itemlen;
jpeg_sof_info sof_info;
for(section=0;;section++) {
if (last_marker==M_COM && comment_correction) {
comment_correction = 2;
}
do {
if ((marker = ImageInfo->infile->getc()) == EOF) {
raise_warning("File structure corrupted");
return 0;
}
if (last_marker==M_COM && comment_correction>0) {
if (marker!=0xFF) {
marker = 0xff;
comment_correction--;
} else {
last_marker = M_PSEUDO;
}
}
} while (marker == 0xff);
if (last_marker==M_COM && !comment_correction) {
raise_notice("Image has corrupt COM section: some software set "
"wrong length information");
}
if (last_marker==M_COM && comment_correction)
return M_EOI;
fpos = ImageInfo->infile->tell();
if (marker == 0xff) {
raise_warning("To many padding bytes");
return 0;
}
if ((lh = ImageInfo->infile->getc()) == EOF) {
raise_warning("File structure corrupted");
return 0;
}
if ((ll = ImageInfo->infile->getc()) == EOF) {
raise_warning("File structure corrupted");
return 0;
}
itemlen = (lh << 8) | ll;
if (itemlen < 2) {
raise_warning("File structure corrupted");
return 0;
}
sn = exif_file_sections_add(ImageInfo, marker, itemlen+1, nullptr);
if (sn == -1) return 0;
Data = ImageInfo->file.list[sn].data;
Data[0] = (unsigned char)lh;
Data[1] = (unsigned char)ll;
String str = ImageInfo->infile->read(itemlen-2);
got = str.length();
if (got != itemlen-2) {
raise_warning("Error reading from file: "
"got=x%04lX(=%lu) != itemlen-2=x%04lX(=%lu)",
got, got, itemlen-2, itemlen-2);
return 0;
}
memcpy(Data+2, str.c_str(), got);
switch(marker) {
case M_SOS:
if (ImageInfo->read_all) {
fpos = ImageInfo->infile->tell();
size = ImageInfo->FileSize - fpos;
sn = exif_file_sections_add(ImageInfo, M_PSEUDO, size, nullptr);
if (sn == -1) return 0;
Data = ImageInfo->file.list[sn].data;
str = ImageInfo->infile->read(size);
got = str.length();
if (got != size) {
raise_warning("Unexpected end of file reached");
return 0;
}
memcpy(Data, str.c_str(), got);
}
return 1;
case M_EOI:
raise_warning("No image in jpeg!");
return (ImageInfo->sections_found&(~FOUND_COMPUTED)) ? 1 : 0;
case M_COM:
exif_process_COM(ImageInfo, (char *)Data, itemlen);
break;
case M_EXIF:
if (!(ImageInfo->sections_found&FOUND_IFD0)) {
exif_process_APP1(ImageInfo, (char *)Data, itemlen, fpos);
}
break;
case M_APP12:
exif_process_APP12(ImageInfo, (char *)Data, itemlen);
break;
case M_SOF0:
case M_SOF1:
case M_SOF2:
case M_SOF3:
case M_SOF5:
case M_SOF6:
case M_SOF7:
case M_SOF9:
case M_SOF10:
case M_SOF11:
case M_SOF13:
case M_SOF14:
case M_SOF15:
if ((itemlen - 2) < 6) {
return 0;
}
exif_process_SOFn(Data, marker, &sof_info);
ImageInfo->Width = sof_info.width;
ImageInfo->Height = sof_info.height;
if (sof_info.num_components == 3) {
ImageInfo->IsColor = 1;
} else {
ImageInfo->IsColor = 0;
}
break;
default:
break;
}
last_marker = marker;
}
return 1;
} | 1,345 |
0 | static void _tiffUnmapProc ( thandle_t fd , void * base , toff_t size ) {
( void ) fd ;
( void ) size ;
UnmapViewOfFile ( base ) ;
} | static void _tiffUnmapProc ( thandle_t fd , void * base , toff_t size ) {
( void ) fd ;
( void ) size ;
UnmapViewOfFile ( base ) ;
} | 1,346 |
0 | void qerror_report_internal(const char *file, int linenr, const char *func, const char *fmt, ...) { va_list va; QError *qerror; va_start(va, fmt); qerror = qerror_from_info(file, linenr, func, fmt, &va); va_end(va); if (cur_mon) { monitor_set_error(cur_mon, qerror); } else { qerror_print(qerror); QDECREF(qerror); } } | void qerror_report_internal(const char *file, int linenr, const char *func, const char *fmt, ...) { va_list va; QError *qerror; va_start(va, fmt); qerror = qerror_from_info(file, linenr, func, fmt, &va); va_end(va); if (cur_mon) { monitor_set_error(cur_mon, qerror); } else { qerror_print(qerror); QDECREF(qerror); } } | 1,347 |
1 | static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
}
}
/*
* In case we didn't find and reset the event that caused
* the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if ((int)val < 0)
write_pmc(i + 1, 0);
}
}
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
} | static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
found = 1;
record_and_restart(event, val, regs, nmi);
}
}
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if ((int)val < 0)
write_pmc(i + 1, 0);
}
}
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
} | 1,348 |
1 | static void exif_process_APP12(image_info_type *ImageInfo,
char *buffer, size_t length) {
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company",
TAG_NONE, TAG_FMT_STRING, l1, buffer+2);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1+1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info",
TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1);
}
}
} | static void exif_process_APP12(image_info_type *ImageInfo,
char *buffer, size_t length) {
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company",
TAG_NONE, TAG_FMT_STRING, l1, buffer+2);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1+1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info",
TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1);
}
}
} | 1,349 |
0 | static void qdev_prop_set(DeviceState *dev, const char *name, void *src, enum PropertyType type) { Property *prop; prop = qdev_prop_find(dev, name); if (!prop) { fprintf(stderr, "%s: property \"%s.%s\" not found\n", __FUNCTION__, object_get_typename(OBJECT(dev)), name); abort(); } if (prop->info->type != type) { fprintf(stderr, "%s: property \"%s.%s\" type mismatch\n", __FUNCTION__, object_get_typename(OBJECT(dev)), name); abort(); } qdev_prop_cpy(dev, prop, src); } | static void qdev_prop_set(DeviceState *dev, const char *name, void *src, enum PropertyType type) { Property *prop; prop = qdev_prop_find(dev, name); if (!prop) { fprintf(stderr, "%s: property \"%s.%s\" not found\n", __FUNCTION__, object_get_typename(OBJECT(dev)), name); abort(); } if (prop->info->type != type) { fprintf(stderr, "%s: property \"%s.%s\" type mismatch\n", __FUNCTION__, object_get_typename(OBJECT(dev)), name); abort(); } qdev_prop_cpy(dev, prop, src); } | 1,351 |
0 | static void exif_process_APP12(image_info_type *ImageInfo,
char *buffer, size_t length) {
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company",
TAG_NONE, TAG_FMT_STRING, l1, buffer+2);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1-1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info",
TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1);
}
}
} | static void exif_process_APP12(image_info_type *ImageInfo,
char *buffer, size_t length) {
size_t l1, l2=0;
if ((l1 = php_strnlen(buffer+2, length-2)) > 0) {
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Company",
TAG_NONE, TAG_FMT_STRING, l1, buffer+2);
if (length > 2+l1+1) {
l2 = php_strnlen(buffer+2+l1+1, length-2-l1-1);
exif_iif_add_tag(ImageInfo, SECTION_APP12, "Info",
TAG_NONE, TAG_FMT_STRING, l2, buffer+2+l1+1);
}
}
} | 1,352 |
1 | size_t util_path_encode(char *s, size_t len)
{
char t[(len * 3)+1];
size_t i, j;
for (i = 0, j = 0; s[i] != '\0'; i++) {
if (s[i] == '/') {
memcpy(&t[j], "\\x2f", 4);
j += 4;
} else if (s[i] == '\\') {
memcpy(&t[j], "\\x5c", 4);
j += 4;
} else {
t[j] = s[i];
j++;
}
}
if (len == 0)
return j;
i = (j < len - 1) ? j : len - 1;
memcpy(s, t, i);
s[i] = '\0';
return j;
} | size_t util_path_encode(char *s, size_t len)
{
char t[(len * 3)+1];
size_t i, j;
for (i = 0, j = 0; s[i] != '\0'; i++) {
if (s[i] == '/') {
memcpy(&t[j], "\\x2f", 4);
j += 4;
} else if (s[i] == '\\') {
memcpy(&t[j], "\\x5c", 4);
j += 4;
} else {
t[j] = s[i];
j++;
}
}
if (len == 0)
return j;
i = (j < len - 1) ? j : len - 1;
memcpy(s, t, i);
s[i] = '\0';
return j;
} | 1,353 |
1 | void vp9_iht4x4_16_add_c ( const int16_t * input , uint8_t * dest , int stride , int tx_type ) {
const transform_2d IHT_4 [ ] = {
{
idct4 , idct4 }
, {
iadst4 , idct4 }
, {
idct4 , iadst4 }
, {
iadst4 , iadst4 }
}
;
int i , j ;
int16_t out [ 4 * 4 ] ;
int16_t * outptr = out ;
int16_t temp_in [ 4 ] , temp_out [ 4 ] ;
for ( i = 0 ;
i < 4 ;
++ i ) {
IHT_4 [ tx_type ] . rows ( input , outptr ) ;
input += 4 ;
outptr += 4 ;
}
for ( i = 0 ;
i < 4 ;
++ i ) {
for ( j = 0 ;
j < 4 ;
++ j ) temp_in [ j ] = out [ j * 4 + i ] ;
IHT_4 [ tx_type ] . cols ( temp_in , temp_out ) ;
for ( j = 0 ;
j < 4 ;
++ j ) dest [ j * stride + i ] = clip_pixel ( ROUND_POWER_OF_TWO ( temp_out [ j ] , 4 ) + dest [ j * stride + i ] ) ;
}
} | void vp9_iht4x4_16_add_c ( const int16_t * input , uint8_t * dest , int stride , int tx_type ) {
const transform_2d IHT_4 [ ] = {
{
idct4 , idct4 }
, {
iadst4 , idct4 }
, {
idct4 , iadst4 }
, {
iadst4 , iadst4 }
}
;
int i , j ;
int16_t out [ 4 * 4 ] ;
int16_t * outptr = out ;
int16_t temp_in [ 4 ] , temp_out [ 4 ] ;
for ( i = 0 ;
i < 4 ;
++ i ) {
IHT_4 [ tx_type ] . rows ( input , outptr ) ;
input += 4 ;
outptr += 4 ;
}
for ( i = 0 ;
i < 4 ;
++ i ) {
for ( j = 0 ;
j < 4 ;
++ j ) temp_in [ j ] = out [ j * 4 + i ] ;
IHT_4 [ tx_type ] . cols ( temp_in , temp_out ) ;
for ( j = 0 ;
j < 4 ;
++ j ) dest [ j * stride + i ] = clip_pixel ( ROUND_POWER_OF_TWO ( temp_out [ j ] , 4 ) + dest [ j * stride + i ] ) ;
}
} | 1,354 |
1 | void async_request(TALLOC_CTX *mem_ctx, struct winbindd_child *child,
struct winbindd_request *request,
struct winbindd_response *response,
void (*continuation)(void *private_data, BOOL success),
void *private_data)
{
struct winbindd_async_request *state;
SMB_ASSERT(continuation != NULL);
state = TALLOC_P(mem_ctx, struct winbindd_async_request);
if (state == NULL) {
DEBUG(0, ("talloc failed\n"));
continuation(private_data, False);
return;
}
state->mem_ctx = mem_ctx;
state->child = child;
state->request = request;
state->response = response;
state->continuation = continuation;
state->private_data = private_data;
DLIST_ADD_END(child->requests, state, struct winbindd_async_request *);
schedule_async_request(child);
return;
} | void async_request(TALLOC_CTX *mem_ctx, struct winbindd_child *child,
struct winbindd_request *request,
struct winbindd_response *response,
void (*continuation)(void *private_data, BOOL success),
void *private_data)
{
struct winbindd_async_request *state;
SMB_ASSERT(continuation != NULL);
state = TALLOC_P(mem_ctx, struct winbindd_async_request);
if (state == NULL) {
DEBUG(0, ("talloc failed\n"));
continuation(private_data, False);
return;
}
state->mem_ctx = mem_ctx;
state->child = child;
state->request = request;
state->response = response;
state->continuation = continuation;
state->private_data = private_data;
DLIST_ADD_END(child->requests, state, struct winbindd_async_request *);
schedule_async_request(child);
return;
} | 1,356 |
0 | void qdev_prop_set_drive_nofail(DeviceState *dev, const char *name, BlockDriverState *value) { if (qdev_prop_set_drive(dev, name, value) < 0) { exit(1); } } | void qdev_prop_set_drive_nofail(DeviceState *dev, const char *name, BlockDriverState *value) { if (qdev_prop_set_drive(dev, name, value) < 0) { exit(1); } } | 1,357 |
0 | static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
} | static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
} | 1,359 |
1 | static void async_main_request_sent(void *private_data, BOOL success)
{
struct winbindd_async_request *state =
talloc_get_type_abort(private_data, struct winbindd_async_request);
if (!success) {
DEBUG(5, ("Could not send async request\n"));
state->response->length = sizeof(struct winbindd_response);
state->response->result = WINBINDD_ERROR;
state->continuation(state->private_data, False);
return;
}
if (state->request->extra_len == 0) {
async_request_sent(private_data, True);
return;
}
setup_async_write(&state->child->event, state->request->extra_data.data,
state->request->extra_len,
async_request_sent, state);
} | static void async_main_request_sent(void *private_data, BOOL success)
{
struct winbindd_async_request *state =
talloc_get_type_abort(private_data, struct winbindd_async_request);
if (!success) {
DEBUG(5, ("Could not send async request\n"));
state->response->length = sizeof(struct winbindd_response);
state->response->result = WINBINDD_ERROR;
state->continuation(state->private_data, False);
return;
}
if (state->request->extra_len == 0) {
async_request_sent(private_data, True);
return;
}
setup_async_write(&state->child->event, state->request->extra_data.data,
state->request->extra_len,
async_request_sent, state);
} | 1,360 |
0 | static int decode ( MimicContext * ctx , int quality , int num_coeffs , int is_iframe ) {
int ret , y , x , plane , cur_row = 0 ;
for ( plane = 0 ;
plane < 3 ;
plane ++ ) {
const int is_chroma = ! ! plane ;
const int qscale = av_clip ( 10000 - quality , is_chroma ? 1000 : 2000 , 10000 ) << 2 ;
const int stride = ctx -> flipped_ptrs [ ctx -> cur_index ] . linesize [ plane ] ;
const uint8_t * src = ctx -> flipped_ptrs [ ctx -> prev_index ] . data [ plane ] ;
uint8_t * dst = ctx -> flipped_ptrs [ ctx -> cur_index ] . data [ plane ] ;
for ( y = 0 ;
y < ctx -> num_vblocks [ plane ] ;
y ++ ) {
for ( x = 0 ;
x < ctx -> num_hblocks [ plane ] ;
x ++ ) {
if ( is_iframe || get_bits1 ( & ctx -> gb ) == is_chroma ) {
if ( is_chroma || is_iframe || ! get_bits1 ( & ctx -> gb ) ) {
if ( ( ret = vlc_decode_block ( ctx , num_coeffs , qscale ) ) < 0 ) {
av_log ( ctx -> avctx , AV_LOG_ERROR , "Error decoding " "block.\n" ) ;
return ret ;
}
ctx -> dsp . idct_put ( dst , stride , ctx -> dct_block ) ;
}
else {
unsigned int backref = get_bits ( & ctx -> gb , 4 ) ;
int index = ( ctx -> cur_index + backref ) & 15 ;
uint8_t * p = ctx -> flipped_ptrs [ index ] . data [ 0 ] ;
if ( index != ctx -> cur_index && p ) {
ff_thread_await_progress ( & ctx -> frames [ index ] , cur_row , 0 ) ;
p += src - ctx -> flipped_ptrs [ ctx -> prev_index ] . data [ plane ] ;
ctx -> dsp . put_pixels_tab [ 1 ] [ 0 ] ( dst , p , stride , 8 ) ;
}
else {
av_log ( ctx -> avctx , AV_LOG_ERROR , "No such backreference! Buggy sample.\n" ) ;
}
}
}
else {
ff_thread_await_progress ( & ctx -> frames [ ctx -> prev_index ] , cur_row , 0 ) ;
ctx -> dsp . put_pixels_tab [ 1 ] [ 0 ] ( dst , src , stride , 8 ) ;
}
src += 8 ;
dst += 8 ;
}
src += ( stride - ctx -> num_hblocks [ plane ] ) << 3 ;
dst += ( stride - ctx -> num_hblocks [ plane ] ) << 3 ;
ff_thread_report_progress ( & ctx -> frames [ ctx -> cur_index ] , cur_row ++ , 0 ) ;
}
}
return 0 ;
} | static int decode ( MimicContext * ctx , int quality , int num_coeffs , int is_iframe ) {
int ret , y , x , plane , cur_row = 0 ;
for ( plane = 0 ;
plane < 3 ;
plane ++ ) {
const int is_chroma = ! ! plane ;
const int qscale = av_clip ( 10000 - quality , is_chroma ? 1000 : 2000 , 10000 ) << 2 ;
const int stride = ctx -> flipped_ptrs [ ctx -> cur_index ] . linesize [ plane ] ;
const uint8_t * src = ctx -> flipped_ptrs [ ctx -> prev_index ] . data [ plane ] ;
uint8_t * dst = ctx -> flipped_ptrs [ ctx -> cur_index ] . data [ plane ] ;
for ( y = 0 ;
y < ctx -> num_vblocks [ plane ] ;
y ++ ) {
for ( x = 0 ;
x < ctx -> num_hblocks [ plane ] ;
x ++ ) {
if ( is_iframe || get_bits1 ( & ctx -> gb ) == is_chroma ) {
if ( is_chroma || is_iframe || ! get_bits1 ( & ctx -> gb ) ) {
if ( ( ret = vlc_decode_block ( ctx , num_coeffs , qscale ) ) < 0 ) {
av_log ( ctx -> avctx , AV_LOG_ERROR , "Error decoding " "block.\n" ) ;
return ret ;
}
ctx -> dsp . idct_put ( dst , stride , ctx -> dct_block ) ;
}
else {
unsigned int backref = get_bits ( & ctx -> gb , 4 ) ;
int index = ( ctx -> cur_index + backref ) & 15 ;
uint8_t * p = ctx -> flipped_ptrs [ index ] . data [ 0 ] ;
if ( index != ctx -> cur_index && p ) {
ff_thread_await_progress ( & ctx -> frames [ index ] , cur_row , 0 ) ;
p += src - ctx -> flipped_ptrs [ ctx -> prev_index ] . data [ plane ] ;
ctx -> dsp . put_pixels_tab [ 1 ] [ 0 ] ( dst , p , stride , 8 ) ;
}
else {
av_log ( ctx -> avctx , AV_LOG_ERROR , "No such backreference! Buggy sample.\n" ) ;
}
}
}
else {
ff_thread_await_progress ( & ctx -> frames [ ctx -> prev_index ] , cur_row , 0 ) ;
ctx -> dsp . put_pixels_tab [ 1 ] [ 0 ] ( dst , src , stride , 8 ) ;
}
src += 8 ;
dst += 8 ;
}
src += ( stride - ctx -> num_hblocks [ plane ] ) << 3 ;
dst += ( stride - ctx -> num_hblocks [ plane ] ) << 3 ;
ff_thread_report_progress ( & ctx -> frames [ ctx -> cur_index ] , cur_row ++ , 0 ) ;
}
}
return 0 ;
} | 1,361 |
0 | static void unset_dirty_tracking(void) { BlkMigDevState *bmds; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { aio_context_acquire(blk_get_aio_context(bmds->blk)); bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap); aio_context_release(blk_get_aio_context(bmds->blk)); } } | static void unset_dirty_tracking(void) { BlkMigDevState *bmds; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { aio_context_acquire(blk_get_aio_context(bmds->blk)); bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap); aio_context_release(blk_get_aio_context(bmds->blk)); } } | 1,363 |
0 | SPL_METHOD ( SplFileInfo , func_name ) \ {
\ spl_filesystem_object * intern = ( spl_filesystem_object * ) zend_object_store_get_object ( getThis ( ) TSRMLS_CC ) ;
\ zend_error_handling error_handling ;
\ if ( zend_parse_parameters_none ( ) == FAILURE ) {
\ return ;
\ }
\ \ zend_replace_error_handling ( EH_THROW , spl_ce_RuntimeException , & error_handling TSRMLS_CC ) ;
\ spl_filesystem_object_get_file_name ( intern TSRMLS_CC ) ;
\ php_stat ( intern -> file_name , intern -> file_name_len , func_num , return_value TSRMLS_CC ) ;
\ zend_restore_error_handling ( & error_handling TSRMLS_CC ) ;
\ }
FileInfoFunction ( getPerms , FS_PERMS ) FileInfoFunction ( getInode , FS_INODE ) FileInfoFunction ( getSize , FS_SIZE ) FileInfoFunction ( getOwner , FS_OWNER ) FileInfoFunction ( getGroup , FS_GROUP ) FileInfoFunction ( getATime , FS_ATIME ) FileInfoFunction ( getMTime , FS_MTIME ) FileInfoFunction ( getCTime , FS_CTIME ) FileInfoFunction ( getType , FS_TYPE ) FileInfoFunction ( isWritable , FS_IS_W ) FileInfoFunction ( isReadable , FS_IS_R ) FileInfoFunction ( isExecutable , FS_IS_X ) FileInfoFunction ( isFile , FS_IS_FILE ) FileInfoFunction ( isDir , FS_IS_DIR ) FileInfoFunction ( isLink , FS_IS_LINK ) | SPL_METHOD ( SplFileInfo , func_name ) \ {
\ spl_filesystem_object * intern = ( spl_filesystem_object * ) zend_object_store_get_object ( getThis ( ) TSRMLS_CC ) ;
\ zend_error_handling error_handling ;
\ if ( zend_parse_parameters_none ( ) == FAILURE ) {
\ return ;
\ }
\ \ zend_replace_error_handling ( EH_THROW , spl_ce_RuntimeException , & error_handling TSRMLS_CC ) ;
\ spl_filesystem_object_get_file_name ( intern TSRMLS_CC ) ;
\ php_stat ( intern -> file_name , intern -> file_name_len , func_num , return_value TSRMLS_CC ) ;
\ zend_restore_error_handling ( & error_handling TSRMLS_CC ) ;
\ }
FileInfoFunction ( getPerms , FS_PERMS ) FileInfoFunction ( getInode , FS_INODE ) FileInfoFunction ( getSize , FS_SIZE ) FileInfoFunction ( getOwner , FS_OWNER ) FileInfoFunction ( getGroup , FS_GROUP ) FileInfoFunction ( getATime , FS_ATIME ) FileInfoFunction ( getMTime , FS_MTIME ) FileInfoFunction ( getCTime , FS_CTIME ) FileInfoFunction ( getType , FS_TYPE ) FileInfoFunction ( isWritable , FS_IS_W ) FileInfoFunction ( isReadable , FS_IS_R ) FileInfoFunction ( isExecutable , FS_IS_X ) FileInfoFunction ( isFile , FS_IS_FILE ) FileInfoFunction ( isDir , FS_IS_DIR ) FileInfoFunction ( isLink , FS_IS_LINK ) | 1,364 |
1 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr;
u64 host_tsc;
int ret = 0;
switch (msr_index) {
#ifdef CONFIG_X86_64
case MSR_EFER:
vmx_load_host_state(vmx);
ret = kvm_set_msr_common(vcpu, msr_index, data);
break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
break;
case MSR_GS_BASE:
vmcs_writel(GUEST_GS_BASE, data);
break;
#endif
case MSR_IA32_SYSENTER_CS:
vmcs_write32(GUEST_SYSENTER_CS, data);
break;
case MSR_IA32_SYSENTER_EIP:
vmcs_writel(GUEST_SYSENTER_EIP, data);
break;
case MSR_IA32_SYSENTER_ESP:
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_TIME_STAMP_COUNTER:
rdtscll(host_tsc);
guest_write_tsc(data, host_tsc);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
/*
* Just discard all writes to the performance counters; this
* should keep both older linux and windows 64-bit guests
* happy
*/
pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, data);
vcpu->arch.pat = data;
break;
}
/* Otherwise falls through to kvm_set_msr_common */
default:
vmx_load_host_state(vmx);
msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
break;
}
ret = kvm_set_msr_common(vcpu, msr_index, data);
}
return ret;
} | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry *msr;
u64 host_tsc;
int ret = 0;
switch (msr_index) {
#ifdef CONFIG_X86_64
case MSR_EFER:
vmx_load_host_state(vmx);
ret = kvm_set_msr_common(vcpu, msr_index, data);
break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
break;
case MSR_GS_BASE:
vmcs_writel(GUEST_GS_BASE, data);
break;
#endif
case MSR_IA32_SYSENTER_CS:
vmcs_write32(GUEST_SYSENTER_CS, data);
break;
case MSR_IA32_SYSENTER_EIP:
vmcs_writel(GUEST_SYSENTER_EIP, data);
break;
case MSR_IA32_SYSENTER_ESP:
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_TIME_STAMP_COUNTER:
rdtscll(host_tsc);
guest_write_tsc(data, host_tsc);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, data);
vcpu->arch.pat = data;
break;
}
default:
vmx_load_host_state(vmx);
msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
break;
}
ret = kvm_set_msr_common(vcpu, msr_index, data);
}
return ret;
} | 1,365 |
0 | Variant HHVM_FUNCTION(apc_add,
const Variant& key_or_array,
const Variant& var /* = null */,
int64_t ttl /* = 0 */) {
if (!apcExtension::Enable) return false;
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
// errors stores all keys corresponding to entries that could not be cached
ArrayInit errors(valuesArr.size(), ArrayInit::Map{});
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
Variant v = iter.second();
auto const& strKey = key.toCStrRef();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return false;
}
if (!apc_store().add(strKey, v, ttl)) {
errors.add(strKey, -1);
}
}
return errors.toVariant();
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
String strKey = key_or_array.toString();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return false;
}
return apc_store().add(strKey, var, ttl);
} | Variant HHVM_FUNCTION(apc_add,
const Variant& key_or_array,
const Variant& var ,
int64_t ttl ) {
if (!apcExtension::Enable) return false;
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
ArrayInit errors(valuesArr.size(), ArrayInit::Map{});
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
Variant v = iter.second();
auto const& strKey = key.toCStrRef();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return false;
}
if (!apc_store().add(strKey, v, ttl)) {
errors.add(strKey, -1);
}
}
return errors.toVariant();
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
String strKey = key_or_array.toString();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return false;
}
return apc_store().add(strKey, var, ttl);
} | 1,366 |
1 | static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
} | static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0;
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
} | 1,367 |
1 | Variant HHVM_FUNCTION(apc_add,
const Variant& key_or_array,
const Variant& var /* = null */,
int64_t ttl /* = 0 */) {
if (!apcExtension::Enable) return false;
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
// errors stores all keys corresponding to entries that could not be cached
ArrayInit errors(valuesArr.size(), ArrayInit::Map{});
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
Variant v = iter.second();
if (!apc_store().add(key.toString(), v, ttl)) {
errors.add(key, -1);
}
}
return errors.toVariant();
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
String strKey = key_or_array.toString();
return apc_store().add(strKey, var, ttl);
} | Variant HHVM_FUNCTION(apc_add,
const Variant& key_or_array,
const Variant& var ,
int64_t ttl ) {
if (!apcExtension::Enable) return false;
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
ArrayInit errors(valuesArr.size(), ArrayInit::Map{});
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
Variant v = iter.second();
if (!apc_store().add(key.toString(), v, ttl)) {
errors.add(key, -1);
}
}
return errors.toVariant();
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return false;
}
String strKey = key_or_array.toString();
return apc_store().add(strKey, var, ttl);
} | 1,368 |
1 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
siginfo_t info;
if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
return -EFAULT;
/* Not even root can pretend to send signals from the kernel.
Nor can they impersonate a kill(), which adds source info. */
if (info.si_code >= 0)
return -EPERM;
info.si_signo = sig;
/* POSIX.1b doesn't mention process groups. */
return kill_proc_info(sig, &info, pid);
} | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
siginfo_t info;
if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
return -EFAULT;
if (info.si_code >= 0)
return -EPERM;
info.si_signo = sig;
return kill_proc_info(sig, &info, pid);
} | 1,369 |
1 | static bool acl_group_override(connection_struct *conn,
gid_t prim_gid,
const char *fname)
{
SMB_STRUCT_STAT sbuf;
if ((errno != EPERM) && (errno != EACCES)) {
return false;
}
/* file primary group == user primary or supplementary group */
if (lp_acl_group_control(SNUM(conn)) &&
current_user_in_group(prim_gid)) {
return true;
}
/* user has writeable permission */
if (lp_dos_filemode(SNUM(conn)) &&
can_write_to_file(conn, fname, &sbuf)) {
return true;
}
return false;
} | static bool acl_group_override(connection_struct *conn,
gid_t prim_gid,
const char *fname)
{
SMB_STRUCT_STAT sbuf;
if ((errno != EPERM) && (errno != EACCES)) {
return false;
}
if (lp_acl_group_control(SNUM(conn)) &&
current_user_in_group(prim_gid)) {
return true;
}
if (lp_dos_filemode(SNUM(conn)) &&
can_write_to_file(conn, fname, &sbuf)) {
return true;
}
return false;
} | 1,371 |
1 | static bool set_canon_ace_list(files_struct *fsp, canon_ace *the_ace, bool default_ace, gid_t prim_gid, bool *pacl_set_support)
{
connection_struct *conn = fsp->conn;
bool ret = False;
SMB_ACL_T the_acl = SMB_VFS_SYS_ACL_INIT(conn, (int)count_canon_ace_list(the_ace) + 1);
canon_ace *p_ace;
int i;
SMB_ACL_ENTRY_T mask_entry;
bool got_mask_entry = False;
SMB_ACL_PERMSET_T mask_permset;
SMB_ACL_TYPE_T the_acl_type = (default_ace ? SMB_ACL_TYPE_DEFAULT : SMB_ACL_TYPE_ACCESS);
bool needs_mask = False;
mode_t mask_perms = 0;
#if defined(POSIX_ACL_NEEDS_MASK)
/* HP-UX always wants to have a mask (called "class" there). */
needs_mask = True;
#endif
if (the_acl == NULL) {
if (!no_acl_syscall_error(errno)) {
/*
* Only print this error message if we have some kind of ACL
* support that's not working. Otherwise we would always get this.
*/
DEBUG(0,("set_canon_ace_list: Unable to init %s ACL. (%s)\n",
default_ace ? "default" : "file", strerror(errno) ));
}
*pacl_set_support = False;
return False;
}
if( DEBUGLVL( 10 )) {
dbgtext("set_canon_ace_list: setting ACL:\n");
for (i = 0, p_ace = the_ace; p_ace; p_ace = p_ace->next, i++ ) {
print_canon_ace( p_ace, i);
}
}
for (i = 0, p_ace = the_ace; p_ace; p_ace = p_ace->next, i++ ) {
SMB_ACL_ENTRY_T the_entry;
SMB_ACL_PERMSET_T the_permset;
/*
* ACLs only "need" an ACL_MASK entry if there are any named user or
* named group entries. But if there is an ACL_MASK entry, it applies
* to ACL_USER, ACL_GROUP, and ACL_GROUP_OBJ entries. Set the mask
* so that it doesn't deny (i.e., mask off) any permissions.
*/
if (p_ace->type == SMB_ACL_USER || p_ace->type == SMB_ACL_GROUP) {
needs_mask = True;
mask_perms |= p_ace->perms;
} else if (p_ace->type == SMB_ACL_GROUP_OBJ) {
mask_perms |= p_ace->perms;
}
/*
* Get the entry for this ACE.
*/
if (SMB_VFS_SYS_ACL_CREATE_ENTRY(conn, &the_acl, &the_entry) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
if (p_ace->type == SMB_ACL_MASK) {
mask_entry = the_entry;
got_mask_entry = True;
}
/*
* Ok - we now know the ACL calls should be working, don't
* allow fallback to chmod.
*/
*pacl_set_support = True;
/*
* Initialise the entry from the canon_ace.
*/
/*
* First tell the entry what type of ACE this is.
*/
if (SMB_VFS_SYS_ACL_SET_TAG_TYPE(conn, the_entry, p_ace->type) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to set tag type on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
/*
* Only set the qualifier (user or group id) if the entry is a user
* or group id ACE.
*/
if ((p_ace->type == SMB_ACL_USER) || (p_ace->type == SMB_ACL_GROUP)) {
if (SMB_VFS_SYS_ACL_SET_QUALIFIER(conn, the_entry,(void *)&p_ace->unix_ug.uid) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to set qualifier on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
}
/*
* Convert the mode_t perms in the canon_ace to a POSIX permset.
*/
if (SMB_VFS_SYS_ACL_GET_PERMSET(conn, the_entry, &the_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to get permset on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
if (map_acl_perms_to_permset(conn, p_ace->perms, &the_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create permset for mode (%u) on entry %d. (%s)\n",
(unsigned int)p_ace->perms, i, strerror(errno) ));
goto fail;
}
/*
* ..and apply them to the entry.
*/
if (SMB_VFS_SYS_ACL_SET_PERMSET(conn, the_entry, the_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to add permset on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
if( DEBUGLVL( 10 ))
print_canon_ace( p_ace, i);
}
if (needs_mask && !got_mask_entry) {
if (SMB_VFS_SYS_ACL_CREATE_ENTRY(conn, &the_acl, &mask_entry) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create mask entry. (%s)\n", strerror(errno) ));
goto fail;
}
if (SMB_VFS_SYS_ACL_SET_TAG_TYPE(conn, mask_entry, SMB_ACL_MASK) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to set tag type on mask entry. (%s)\n",strerror(errno) ));
goto fail;
}
if (SMB_VFS_SYS_ACL_GET_PERMSET(conn, mask_entry, &mask_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to get mask permset. (%s)\n", strerror(errno) ));
goto fail;
}
if (map_acl_perms_to_permset(conn, S_IRUSR|S_IWUSR|S_IXUSR, &mask_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create mask permset. (%s)\n", strerror(errno) ));
goto fail;
}
if (SMB_VFS_SYS_ACL_SET_PERMSET(conn, mask_entry, mask_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to add mask permset. (%s)\n", strerror(errno) ));
goto fail;
}
}
/*
* Finally apply it to the file or directory.
*/
if(default_ace || fsp->is_directory || fsp->fh->fd == -1) {
if (SMB_VFS_SYS_ACL_SET_FILE(conn, fsp->fsp_name, the_acl_type, the_acl) == -1) {
/*
* Some systems allow all the above calls and only fail with no ACL support
* when attempting to apply the acl. HPUX with HFS is an example of this. JRA.
*/
if (no_acl_syscall_error(errno)) {
*pacl_set_support = False;
}
if (acl_group_override(conn, prim_gid, fsp->fsp_name)) {
int sret;
DEBUG(5,("set_canon_ace_list: acl group control on and current user in file %s primary group.\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_SYS_ACL_SET_FILE(conn, fsp->fsp_name, the_acl_type, the_acl);
unbecome_root();
if (sret == 0) {
ret = True;
}
}
if (ret == False) {
DEBUG(2,("set_canon_ace_list: sys_acl_set_file type %s failed for file %s (%s).\n",
the_acl_type == SMB_ACL_TYPE_DEFAULT ? "directory default" : "file",
fsp->fsp_name, strerror(errno) ));
goto fail;
}
}
} else {
if (SMB_VFS_SYS_ACL_SET_FD(fsp, the_acl) == -1) {
/*
* Some systems allow all the above calls and only fail with no ACL support
* when attempting to apply the acl. HPUX with HFS is an example of this. JRA.
*/
if (no_acl_syscall_error(errno)) {
*pacl_set_support = False;
}
if (acl_group_override(conn, prim_gid, fsp->fsp_name)) {
int sret;
DEBUG(5,("set_canon_ace_list: acl group control on and current user in file %s primary group.\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_SYS_ACL_SET_FD(fsp, the_acl);
unbecome_root();
if (sret == 0) {
ret = True;
}
}
if (ret == False) {
DEBUG(2,("set_canon_ace_list: sys_acl_set_file failed for file %s (%s).\n",
fsp->fsp_name, strerror(errno) ));
goto fail;
}
}
}
ret = True;
fail:
if (the_acl != NULL) {
SMB_VFS_SYS_ACL_FREE_ACL(conn, the_acl);
}
return ret;
} | static bool set_canon_ace_list(files_struct *fsp, canon_ace *the_ace, bool default_ace, gid_t prim_gid, bool *pacl_set_support)
{
connection_struct *conn = fsp->conn;
bool ret = False;
SMB_ACL_T the_acl = SMB_VFS_SYS_ACL_INIT(conn, (int)count_canon_ace_list(the_ace) + 1);
canon_ace *p_ace;
int i;
SMB_ACL_ENTRY_T mask_entry;
bool got_mask_entry = False;
SMB_ACL_PERMSET_T mask_permset;
SMB_ACL_TYPE_T the_acl_type = (default_ace ? SMB_ACL_TYPE_DEFAULT : SMB_ACL_TYPE_ACCESS);
bool needs_mask = False;
mode_t mask_perms = 0;
#if defined(POSIX_ACL_NEEDS_MASK)
needs_mask = True;
#endif
if (the_acl == NULL) {
if (!no_acl_syscall_error(errno)) {
DEBUG(0,("set_canon_ace_list: Unable to init %s ACL. (%s)\n",
default_ace ? "default" : "file", strerror(errno) ));
}
*pacl_set_support = False;
return False;
}
if( DEBUGLVL( 10 )) {
dbgtext("set_canon_ace_list: setting ACL:\n");
for (i = 0, p_ace = the_ace; p_ace; p_ace = p_ace->next, i++ ) {
print_canon_ace( p_ace, i);
}
}
for (i = 0, p_ace = the_ace; p_ace; p_ace = p_ace->next, i++ ) {
SMB_ACL_ENTRY_T the_entry;
SMB_ACL_PERMSET_T the_permset;
if (p_ace->type == SMB_ACL_USER || p_ace->type == SMB_ACL_GROUP) {
needs_mask = True;
mask_perms |= p_ace->perms;
} else if (p_ace->type == SMB_ACL_GROUP_OBJ) {
mask_perms |= p_ace->perms;
}
if (SMB_VFS_SYS_ACL_CREATE_ENTRY(conn, &the_acl, &the_entry) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
if (p_ace->type == SMB_ACL_MASK) {
mask_entry = the_entry;
got_mask_entry = True;
}
*pacl_set_support = True;
if (SMB_VFS_SYS_ACL_SET_TAG_TYPE(conn, the_entry, p_ace->type) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to set tag type on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
if ((p_ace->type == SMB_ACL_USER) || (p_ace->type == SMB_ACL_GROUP)) {
if (SMB_VFS_SYS_ACL_SET_QUALIFIER(conn, the_entry,(void *)&p_ace->unix_ug.uid) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to set qualifier on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
}
if (SMB_VFS_SYS_ACL_GET_PERMSET(conn, the_entry, &the_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to get permset on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
if (map_acl_perms_to_permset(conn, p_ace->perms, &the_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create permset for mode (%u) on entry %d. (%s)\n",
(unsigned int)p_ace->perms, i, strerror(errno) ));
goto fail;
}
if (SMB_VFS_SYS_ACL_SET_PERMSET(conn, the_entry, the_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to add permset on entry %d. (%s)\n",
i, strerror(errno) ));
goto fail;
}
if( DEBUGLVL( 10 ))
print_canon_ace( p_ace, i);
}
if (needs_mask && !got_mask_entry) {
if (SMB_VFS_SYS_ACL_CREATE_ENTRY(conn, &the_acl, &mask_entry) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create mask entry. (%s)\n", strerror(errno) ));
goto fail;
}
if (SMB_VFS_SYS_ACL_SET_TAG_TYPE(conn, mask_entry, SMB_ACL_MASK) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to set tag type on mask entry. (%s)\n",strerror(errno) ));
goto fail;
}
if (SMB_VFS_SYS_ACL_GET_PERMSET(conn, mask_entry, &mask_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to get mask permset. (%s)\n", strerror(errno) ));
goto fail;
}
if (map_acl_perms_to_permset(conn, S_IRUSR|S_IWUSR|S_IXUSR, &mask_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to create mask permset. (%s)\n", strerror(errno) ));
goto fail;
}
if (SMB_VFS_SYS_ACL_SET_PERMSET(conn, mask_entry, mask_permset) == -1) {
DEBUG(0,("set_canon_ace_list: Failed to add mask permset. (%s)\n", strerror(errno) ));
goto fail;
}
}
if(default_ace || fsp->is_directory || fsp->fh->fd == -1) {
if (SMB_VFS_SYS_ACL_SET_FILE(conn, fsp->fsp_name, the_acl_type, the_acl) == -1) {
if (no_acl_syscall_error(errno)) {
*pacl_set_support = False;
}
if (acl_group_override(conn, prim_gid, fsp->fsp_name)) {
int sret;
DEBUG(5,("set_canon_ace_list: acl group control on and current user in file %s primary group.\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_SYS_ACL_SET_FILE(conn, fsp->fsp_name, the_acl_type, the_acl);
unbecome_root();
if (sret == 0) {
ret = True;
}
}
if (ret == False) {
DEBUG(2,("set_canon_ace_list: sys_acl_set_file type %s failed for file %s (%s).\n",
the_acl_type == SMB_ACL_TYPE_DEFAULT ? "directory default" : "file",
fsp->fsp_name, strerror(errno) ));
goto fail;
}
}
} else {
if (SMB_VFS_SYS_ACL_SET_FD(fsp, the_acl) == -1) {
if (no_acl_syscall_error(errno)) {
*pacl_set_support = False;
}
if (acl_group_override(conn, prim_gid, fsp->fsp_name)) {
int sret;
DEBUG(5,("set_canon_ace_list: acl group control on and current user in file %s primary group.\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_SYS_ACL_SET_FD(fsp, the_acl);
unbecome_root();
if (sret == 0) {
ret = True;
}
}
if (ret == False) {
DEBUG(2,("set_canon_ace_list: sys_acl_set_file failed for file %s (%s).\n",
fsp->fsp_name, strerror(errno) ));
goto fail;
}
}
}
ret = True;
fail:
if (the_acl != NULL) {
SMB_VFS_SYS_ACL_FREE_ACL(conn, the_acl);
}
return ret;
} | 1,373 |
1 | void vp9_fht4x4_c ( const int16_t * input , int16_t * output , int stride , int tx_type ) {
if ( tx_type == DCT_DCT ) {
vp9_fdct4x4_c ( input , output , stride ) ;
}
else {
int16_t out [ 4 * 4 ] ;
int16_t * outptr = & out [ 0 ] ;
int i , j ;
int16_t temp_in [ 4 ] , temp_out [ 4 ] ;
const transform_2d ht = FHT_4 [ tx_type ] ;
for ( i = 0 ;
i < 4 ;
++ i ) {
for ( j = 0 ;
j < 4 ;
++ j ) temp_in [ j ] = input [ j * stride + i ] * 16 ;
if ( i == 0 && temp_in [ 0 ] ) temp_in [ 0 ] += 1 ;
ht . cols ( temp_in , temp_out ) ;
for ( j = 0 ;
j < 4 ;
++ j ) outptr [ j * 4 + i ] = temp_out [ j ] ;
}
for ( i = 0 ;
i < 4 ;
++ i ) {
for ( j = 0 ;
j < 4 ;
++ j ) temp_in [ j ] = out [ j + i * 4 ] ;
ht . rows ( temp_in , temp_out ) ;
for ( j = 0 ;
j < 4 ;
++ j ) output [ j + i * 4 ] = ( temp_out [ j ] + 1 ) >> 2 ;
}
}
} | void vp9_fht4x4_c ( const int16_t * input , int16_t * output , int stride , int tx_type ) {
if ( tx_type == DCT_DCT ) {
vp9_fdct4x4_c ( input , output , stride ) ;
}
else {
int16_t out [ 4 * 4 ] ;
int16_t * outptr = & out [ 0 ] ;
int i , j ;
int16_t temp_in [ 4 ] , temp_out [ 4 ] ;
const transform_2d ht = FHT_4 [ tx_type ] ;
for ( i = 0 ;
i < 4 ;
++ i ) {
for ( j = 0 ;
j < 4 ;
++ j ) temp_in [ j ] = input [ j * stride + i ] * 16 ;
if ( i == 0 && temp_in [ 0 ] ) temp_in [ 0 ] += 1 ;
ht . cols ( temp_in , temp_out ) ;
for ( j = 0 ;
j < 4 ;
++ j ) outptr [ j * 4 + i ] = temp_out [ j ] ;
}
for ( i = 0 ;
i < 4 ;
++ i ) {
for ( j = 0 ;
j < 4 ;
++ j ) temp_in [ j ] = out [ j + i * 4 ] ;
ht . rows ( temp_in , temp_out ) ;
for ( j = 0 ;
j < 4 ;
++ j ) output [ j + i * 4 ] = ( temp_out [ j ] + 1 ) >> 2 ;
}
}
} | 1,374 |
0 | Variant HHVM_FUNCTION(apc_store,
const Variant& key_or_array,
const Variant& var /* = null */,
int64_t ttl /* = 0 */) {
if (!apcExtension::Enable) return Variant(false);
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
Variant v = iter.second();
auto const& strKey = key.toCStrRef();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return Variant(false);
}
apc_store().set(strKey, v, ttl);
}
return Variant(ArrayData::Create());
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
String strKey = key_or_array.toString();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return Variant(false);
}
apc_store().set(strKey, var, ttl);
return Variant(true);
} | Variant HHVM_FUNCTION(apc_store,
const Variant& key_or_array,
const Variant& var ,
int64_t ttl ) {
if (!apcExtension::Enable) return Variant(false);
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
Variant v = iter.second();
auto const& strKey = key.toCStrRef();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return Variant(false);
}
apc_store().set(strKey, v, ttl);
}
return Variant(ArrayData::Create());
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
String strKey = key_or_array.toString();
if (isKeyInvalid(strKey)) {
throw_invalid_argument("apc key: (contains invalid characters)");
return Variant(false);
}
apc_store().set(strKey, var, ttl);
return Variant(true);
} | 1,375 |
0 | av_cold int ff_ivi_decode_close(AVCodecContext *avctx) { IVI45DecContext *ctx = avctx->priv_data; ivi_free_buffers(&ctx->planes[0]); if (ctx->mb_vlc.cust_tab.table) ff_free_vlc(&ctx->mb_vlc.cust_tab); #if IVI4_STREAM_ANALYSER if (ctx->is_indeo4) { if (ctx->is_scalable) av_log(avctx, AV_LOG_ERROR, "This video uses scalability mode!\n"); if (ctx->uses_tiling) av_log(avctx, AV_LOG_ERROR, "This video uses local decoding!\n"); if (ctx->has_b_frames) av_log(avctx, AV_LOG_ERROR, "This video contains B-frames!\n"); if (ctx->has_transp) av_log(avctx, AV_LOG_ERROR, "Transparency mode is enabled!\n"); if (ctx->uses_haar) av_log(avctx, AV_LOG_ERROR, "This video uses Haar transform!\n"); if (ctx->uses_fullpel) av_log(avctx, AV_LOG_ERROR, "This video uses fullpel motion vectors!\n"); } #endif av_frame_free(&ctx->p_frame); return 0; } | av_cold int ff_ivi_decode_close(AVCodecContext *avctx) { IVI45DecContext *ctx = avctx->priv_data; ivi_free_buffers(&ctx->planes[0]); if (ctx->mb_vlc.cust_tab.table) ff_free_vlc(&ctx->mb_vlc.cust_tab); #if IVI4_STREAM_ANALYSER if (ctx->is_indeo4) { if (ctx->is_scalable) av_log(avctx, AV_LOG_ERROR, "This video uses scalability mode!\n"); if (ctx->uses_tiling) av_log(avctx, AV_LOG_ERROR, "This video uses local decoding!\n"); if (ctx->has_b_frames) av_log(avctx, AV_LOG_ERROR, "This video contains B-frames!\n"); if (ctx->has_transp) av_log(avctx, AV_LOG_ERROR, "Transparency mode is enabled!\n"); if (ctx->uses_haar) av_log(avctx, AV_LOG_ERROR, "This video uses Haar transform!\n"); if (ctx->uses_fullpel) av_log(avctx, AV_LOG_ERROR, "This video uses fullpel motion vectors!\n"); } #endif av_frame_free(&ctx->p_frame); return 0; } | 1,376 |
1 | Variant HHVM_FUNCTION(apc_store,
const Variant& key_or_array,
const Variant& var /* = null */,
int64_t ttl /* = 0 */) {
if (!apcExtension::Enable) return Variant(false);
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
Variant v = iter.second();
apc_store().set(key.toString(), v, ttl);
}
return Variant(ArrayData::Create());
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
String strKey = key_or_array.toString();
apc_store().set(strKey, var, ttl);
return Variant(true);
} | Variant HHVM_FUNCTION(apc_store,
const Variant& key_or_array,
const Variant& var ,
int64_t ttl ) {
if (!apcExtension::Enable) return Variant(false);
if (key_or_array.isArray()) {
Array valuesArr = key_or_array.toArray();
for (ArrayIter iter(valuesArr); iter; ++iter) {
Variant key = iter.first();
if (!key.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
Variant v = iter.second();
apc_store().set(key.toString(), v, ttl);
}
return Variant(ArrayData::Create());
}
if (!key_or_array.isString()) {
throw_invalid_argument("apc key: (not a string)");
return Variant(false);
}
String strKey = key_or_array.toString();
apc_store().set(strKey, var, ttl);
return Variant(true);
} | 1,378 |
1 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
{
/* This is only valid for single tasks */
if (pid <= 0 || tgid <= 0)
return -EINVAL;
/* Not even root can pretend to send signals from the kernel.
Nor can they impersonate a kill(), which adds source info. */
if (info->si_code >= 0)
return -EPERM;
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
} | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
{
if (pid <= 0 || tgid <= 0)
return -EINVAL;
if (info->si_code >= 0)
return -EPERM;
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
} | 1,380 |
1 | NTSTATUS set_nt_acl(files_struct *fsp, uint32 security_info_sent, SEC_DESC *psd)
{
connection_struct *conn = fsp->conn;
uid_t user = (uid_t)-1;
gid_t grp = (gid_t)-1;
SMB_STRUCT_STAT sbuf;
DOM_SID file_owner_sid;
DOM_SID file_grp_sid;
canon_ace *file_ace_list = NULL;
canon_ace *dir_ace_list = NULL;
bool acl_perms = False;
mode_t orig_mode = (mode_t)0;
NTSTATUS status;
bool set_acl_as_root = false;
bool acl_set_support = false;
bool ret = false;
DEBUG(10,("set_nt_acl: called for file %s\n", fsp->fsp_name ));
if (!CAN_WRITE(conn)) {
DEBUG(10,("set acl rejected on read-only share\n"));
return NT_STATUS_MEDIA_WRITE_PROTECTED;
}
/*
* Get the current state of the file.
*/
if(fsp->is_directory || fsp->fh->fd == -1) {
if(SMB_VFS_STAT(fsp->conn,fsp->fsp_name, &sbuf) != 0)
return map_nt_error_from_unix(errno);
} else {
if(SMB_VFS_FSTAT(fsp, &sbuf) != 0)
return map_nt_error_from_unix(errno);
}
/* Save the original element we check against. */
orig_mode = sbuf.st_mode;
/*
* Unpack the user/group/world id's.
*/
status = unpack_nt_owners( SNUM(conn), &user, &grp, security_info_sent, psd);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
/*
* Do we need to chown ? If so this must be done first as the incoming
* CREATOR_OWNER acl will be relative to the *new* owner, not the old.
* Noticed by Simo.
*/
if (((user != (uid_t)-1) && (sbuf.st_uid != user)) || (( grp != (gid_t)-1) && (sbuf.st_gid != grp))) {
DEBUG(3,("set_nt_acl: chown %s. uid = %u, gid = %u.\n",
fsp->fsp_name, (unsigned int)user, (unsigned int)grp ));
if(try_chown( fsp->conn, fsp->fsp_name, user, grp) == -1) {
DEBUG(3,("set_nt_acl: chown %s, %u, %u failed. Error = %s.\n",
fsp->fsp_name, (unsigned int)user, (unsigned int)grp, strerror(errno) ));
if (errno == EPERM) {
return NT_STATUS_INVALID_OWNER;
}
return map_nt_error_from_unix(errno);
}
/*
* Recheck the current state of the file, which may have changed.
* (suid/sgid bits, for instance)
*/
if(fsp->is_directory) {
if(SMB_VFS_STAT(fsp->conn, fsp->fsp_name, &sbuf) != 0) {
return map_nt_error_from_unix(errno);
}
} else {
int sret;
if(fsp->fh->fd == -1)
sret = SMB_VFS_STAT(fsp->conn, fsp->fsp_name, &sbuf);
else
sret = SMB_VFS_FSTAT(fsp, &sbuf);
if(sret != 0)
return map_nt_error_from_unix(errno);
}
/* Save the original element we check against. */
orig_mode = sbuf.st_mode;
/* If we successfully chowned, we know we must
* be able to set the acl, so do it as root.
*/
set_acl_as_root = true;
}
create_file_sids(&sbuf, &file_owner_sid, &file_grp_sid);
acl_perms = unpack_canon_ace( fsp, &sbuf, &file_owner_sid, &file_grp_sid,
&file_ace_list, &dir_ace_list, security_info_sent, psd);
/* Ignore W2K traverse DACL set. */
if (!file_ace_list && !dir_ace_list) {
return NT_STATUS_OK;
}
if (!acl_perms) {
DEBUG(3,("set_nt_acl: cannot set permissions\n"));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return NT_STATUS_ACCESS_DENIED;
}
/*
* Only change security if we got a DACL.
*/
if(!(security_info_sent & DACL_SECURITY_INFORMATION) || (psd->dacl == NULL)) {
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return NT_STATUS_OK;
}
/*
* Try using the POSIX ACL set first. Fall back to chmod if
* we have no ACL support on this filesystem.
*/
if (acl_perms && file_ace_list) {
if (set_acl_as_root) {
become_root();
}
ret = set_canon_ace_list(fsp, file_ace_list, False, sbuf.st_gid, &acl_set_support);
if (set_acl_as_root) {
unbecome_root();
}
if (acl_set_support && ret == false) {
DEBUG(3,("set_nt_acl: failed to set file acl on file %s (%s).\n", fsp->fsp_name, strerror(errno) ));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
}
if (acl_perms && acl_set_support && fsp->is_directory) {
if (dir_ace_list) {
if (set_acl_as_root) {
become_root();
}
ret = set_canon_ace_list(fsp, dir_ace_list, True, sbuf.st_gid, &acl_set_support);
if (set_acl_as_root) {
unbecome_root();
}
if (ret == false) {
DEBUG(3,("set_nt_acl: failed to set default acl on directory %s (%s).\n", fsp->fsp_name, strerror(errno) ));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
} else {
int sret = -1;
/*
* No default ACL - delete one if it exists.
*/
if (set_acl_as_root) {
become_root();
}
sret = SMB_VFS_SYS_ACL_DELETE_DEF_FILE(conn, fsp->fsp_name);
if (set_acl_as_root) {
unbecome_root();
}
if (sret == -1) {
if (acl_group_override(conn, sbuf.st_gid, fsp->fsp_name)) {
DEBUG(5,("set_nt_acl: acl group control on and "
"current user in file %s primary group. Override delete_def_acl\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_SYS_ACL_DELETE_DEF_FILE(conn, fsp->fsp_name);
unbecome_root();
}
if (sret == -1) {
DEBUG(3,("set_nt_acl: sys_acl_delete_def_file failed (%s)\n", strerror(errno)));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
}
}
}
if (acl_set_support) {
if (set_acl_as_root) {
become_root();
}
store_inheritance_attributes(fsp, file_ace_list, dir_ace_list,
(psd->type & SE_DESC_DACL_PROTECTED) ? True : False);
if (set_acl_as_root) {
unbecome_root();
}
}
/*
* If we cannot set using POSIX ACLs we fall back to checking if we need to chmod.
*/
if(!acl_set_support && acl_perms) {
mode_t posix_perms;
if (!convert_canon_ace_to_posix_perms( fsp, file_ace_list, &posix_perms)) {
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
DEBUG(3,("set_nt_acl: failed to convert file acl to posix permissions for file %s.\n",
fsp->fsp_name ));
return NT_STATUS_ACCESS_DENIED;
}
if (orig_mode != posix_perms) {
int sret = -1;
DEBUG(3,("set_nt_acl: chmod %s. perms = 0%o.\n",
fsp->fsp_name, (unsigned int)posix_perms ));
if (set_acl_as_root) {
become_root();
}
sret = SMB_VFS_CHMOD(conn,fsp->fsp_name, posix_perms);
if (set_acl_as_root) {
unbecome_root();
}
if(sret == -1) {
if (acl_group_override(conn, sbuf.st_gid, fsp->fsp_name)) {
DEBUG(5,("set_nt_acl: acl group control on and "
"current user in file %s primary group. Override chmod\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_CHMOD(conn,fsp->fsp_name, posix_perms);
unbecome_root();
}
if (sret == -1) {
DEBUG(3,("set_nt_acl: chmod %s, 0%o failed. Error = %s.\n",
fsp->fsp_name, (unsigned int)posix_perms, strerror(errno) ));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
}
}
}
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return NT_STATUS_OK;
} | NTSTATUS set_nt_acl(files_struct *fsp, uint32 security_info_sent, SEC_DESC *psd)
{
connection_struct *conn = fsp->conn;
uid_t user = (uid_t)-1;
gid_t grp = (gid_t)-1;
SMB_STRUCT_STAT sbuf;
DOM_SID file_owner_sid;
DOM_SID file_grp_sid;
canon_ace *file_ace_list = NULL;
canon_ace *dir_ace_list = NULL;
bool acl_perms = False;
mode_t orig_mode = (mode_t)0;
NTSTATUS status;
bool set_acl_as_root = false;
bool acl_set_support = false;
bool ret = false;
DEBUG(10,("set_nt_acl: called for file %s\n", fsp->fsp_name ));
if (!CAN_WRITE(conn)) {
DEBUG(10,("set acl rejected on read-only share\n"));
return NT_STATUS_MEDIA_WRITE_PROTECTED;
}
if(fsp->is_directory || fsp->fh->fd == -1) {
if(SMB_VFS_STAT(fsp->conn,fsp->fsp_name, &sbuf) != 0)
return map_nt_error_from_unix(errno);
} else {
if(SMB_VFS_FSTAT(fsp, &sbuf) != 0)
return map_nt_error_from_unix(errno);
}
orig_mode = sbuf.st_mode;
status = unpack_nt_owners( SNUM(conn), &user, &grp, security_info_sent, psd);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
if (((user != (uid_t)-1) && (sbuf.st_uid != user)) || (( grp != (gid_t)-1) && (sbuf.st_gid != grp))) {
DEBUG(3,("set_nt_acl: chown %s. uid = %u, gid = %u.\n",
fsp->fsp_name, (unsigned int)user, (unsigned int)grp ));
if(try_chown( fsp->conn, fsp->fsp_name, user, grp) == -1) {
DEBUG(3,("set_nt_acl: chown %s, %u, %u failed. Error = %s.\n",
fsp->fsp_name, (unsigned int)user, (unsigned int)grp, strerror(errno) ));
if (errno == EPERM) {
return NT_STATUS_INVALID_OWNER;
}
return map_nt_error_from_unix(errno);
}
if(fsp->is_directory) {
if(SMB_VFS_STAT(fsp->conn, fsp->fsp_name, &sbuf) != 0) {
return map_nt_error_from_unix(errno);
}
} else {
int sret;
if(fsp->fh->fd == -1)
sret = SMB_VFS_STAT(fsp->conn, fsp->fsp_name, &sbuf);
else
sret = SMB_VFS_FSTAT(fsp, &sbuf);
if(sret != 0)
return map_nt_error_from_unix(errno);
}
orig_mode = sbuf.st_mode;
set_acl_as_root = true;
}
create_file_sids(&sbuf, &file_owner_sid, &file_grp_sid);
acl_perms = unpack_canon_ace( fsp, &sbuf, &file_owner_sid, &file_grp_sid,
&file_ace_list, &dir_ace_list, security_info_sent, psd);
if (!file_ace_list && !dir_ace_list) {
return NT_STATUS_OK;
}
if (!acl_perms) {
DEBUG(3,("set_nt_acl: cannot set permissions\n"));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return NT_STATUS_ACCESS_DENIED;
}
if(!(security_info_sent & DACL_SECURITY_INFORMATION) || (psd->dacl == NULL)) {
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return NT_STATUS_OK;
}
if (acl_perms && file_ace_list) {
if (set_acl_as_root) {
become_root();
}
ret = set_canon_ace_list(fsp, file_ace_list, False, sbuf.st_gid, &acl_set_support);
if (set_acl_as_root) {
unbecome_root();
}
if (acl_set_support && ret == false) {
DEBUG(3,("set_nt_acl: failed to set file acl on file %s (%s).\n", fsp->fsp_name, strerror(errno) ));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
}
if (acl_perms && acl_set_support && fsp->is_directory) {
if (dir_ace_list) {
if (set_acl_as_root) {
become_root();
}
ret = set_canon_ace_list(fsp, dir_ace_list, True, sbuf.st_gid, &acl_set_support);
if (set_acl_as_root) {
unbecome_root();
}
if (ret == false) {
DEBUG(3,("set_nt_acl: failed to set default acl on directory %s (%s).\n", fsp->fsp_name, strerror(errno) ));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
} else {
int sret = -1;
if (set_acl_as_root) {
become_root();
}
sret = SMB_VFS_SYS_ACL_DELETE_DEF_FILE(conn, fsp->fsp_name);
if (set_acl_as_root) {
unbecome_root();
}
if (sret == -1) {
if (acl_group_override(conn, sbuf.st_gid, fsp->fsp_name)) {
DEBUG(5,("set_nt_acl: acl group control on and "
"current user in file %s primary group. Override delete_def_acl\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_SYS_ACL_DELETE_DEF_FILE(conn, fsp->fsp_name);
unbecome_root();
}
if (sret == -1) {
DEBUG(3,("set_nt_acl: sys_acl_delete_def_file failed (%s)\n", strerror(errno)));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
}
}
}
if (acl_set_support) {
if (set_acl_as_root) {
become_root();
}
store_inheritance_attributes(fsp, file_ace_list, dir_ace_list,
(psd->type & SE_DESC_DACL_PROTECTED) ? True : False);
if (set_acl_as_root) {
unbecome_root();
}
}
if(!acl_set_support && acl_perms) {
mode_t posix_perms;
if (!convert_canon_ace_to_posix_perms( fsp, file_ace_list, &posix_perms)) {
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
DEBUG(3,("set_nt_acl: failed to convert file acl to posix permissions for file %s.\n",
fsp->fsp_name ));
return NT_STATUS_ACCESS_DENIED;
}
if (orig_mode != posix_perms) {
int sret = -1;
DEBUG(3,("set_nt_acl: chmod %s. perms = 0%o.\n",
fsp->fsp_name, (unsigned int)posix_perms ));
if (set_acl_as_root) {
become_root();
}
sret = SMB_VFS_CHMOD(conn,fsp->fsp_name, posix_perms);
if (set_acl_as_root) {
unbecome_root();
}
if(sret == -1) {
if (acl_group_override(conn, sbuf.st_gid, fsp->fsp_name)) {
DEBUG(5,("set_nt_acl: acl group control on and "
"current user in file %s primary group. Override chmod\n",
fsp->fsp_name ));
become_root();
sret = SMB_VFS_CHMOD(conn,fsp->fsp_name, posix_perms);
unbecome_root();
}
if (sret == -1) {
DEBUG(3,("set_nt_acl: chmod %s, 0%o failed. Error = %s.\n",
fsp->fsp_name, (unsigned int)posix_perms, strerror(errno) ));
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return map_nt_error_from_unix(errno);
}
}
}
}
free_canon_ace_list(file_ace_list);
free_canon_ace_list(dir_ace_list);
return NT_STATUS_OK;
} | 1,381 |
0 | char * get_constraint_name ( Oid conoid ) {
HeapTuple tp ;
tp = SearchSysCache1 ( CONSTROID , ObjectIdGetDatum ( conoid ) ) ;
if ( HeapTupleIsValid ( tp ) ) {
Form_pg_constraint contup = ( Form_pg_constraint ) GETSTRUCT ( tp ) ;
char * result ;
result = pstrdup ( NameStr ( contup -> conname ) ) ;
ReleaseSysCache ( tp ) ;
return result ;
}
else return NULL ;
} | char * get_constraint_name ( Oid conoid ) {
HeapTuple tp ;
tp = SearchSysCache1 ( CONSTROID , ObjectIdGetDatum ( conoid ) ) ;
if ( HeapTupleIsValid ( tp ) ) {
Form_pg_constraint contup = ( Form_pg_constraint ) GETSTRUCT ( tp ) ;
char * result ;
result = pstrdup ( NameStr ( contup -> conname ) ) ;
ReleaseSysCache ( tp ) ;
return result ;
}
else return NULL ;
} | 1,382 |
1 | static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
struct sock *sk = tun->sk;
unsigned int mask = 0;
if (!tun)
return POLLERR;
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
poll_wait(file, &tun->socket.wait, wait);
if (!skb_queue_empty(&tun->readq))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
(!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
sock_writeable(sk)))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
mask = POLLERR;
tun_put(tun);
return mask;
} | static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
struct sock *sk = tun->sk;
unsigned int mask = 0;
if (!tun)
return POLLERR;
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
poll_wait(file, &tun->socket.wait, wait);
if (!skb_queue_empty(&tun->readq))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
(!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
sock_writeable(sk)))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
mask = POLLERR;
tun_put(tun);
return mask;
} | 1,383 |
1 | static int xen_host_pci_config_open(XenHostPCIDevice *d) { char path[PATH_MAX]; int rc; rc = xen_host_pci_sysfs_path(d, "config", path, sizeof (path)); if (rc) { return rc; } d->config_fd = open(path, O_RDWR); if (d->config_fd < 0) { return -errno; } return 0; } | static int xen_host_pci_config_open(XenHostPCIDevice *d) { char path[PATH_MAX]; int rc; rc = xen_host_pci_sysfs_path(d, "config", path, sizeof (path)); if (rc) { return rc; } d->config_fd = open(path, O_RDWR); if (d->config_fd < 0) { return -errno; } return 0; } | 1,384 |
0 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
{
/* This is only valid for single tasks */
if (pid <= 0 || tgid <= 0)
return -EINVAL;
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if (info->si_code != SI_QUEUE) {
/* We used to allow any < 0 si_code */
WARN_ON_ONCE(info->si_code < 0);
return -EPERM;
}
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
} | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
{
if (pid <= 0 || tgid <= 0)
return -EINVAL;
if (info->si_code != SI_QUEUE) {
WARN_ON_ONCE(info->si_code < 0);
return -EPERM;
}
info->si_signo = sig;
return do_send_specific(tgid, pid, sig, info);
} | 1,386 |
0 | void proto_register_dns ( void ) {
static hf_register_info hf [ ] = {
{
& hf_dns_length , {
"Length" , "dns.length" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Length of DNS-over-TCP request or response" , HFILL }
}
, {
& hf_dns_flags , {
"Flags" , "dns.flags" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_flags_response , {
"Response" , "dns.flags.response" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_response ) , F_RESPONSE , "Is the message a response?" , HFILL }
}
, {
& hf_dns_flags_opcode , {
"Opcode" , "dns.flags.opcode" , FT_UINT16 , BASE_DEC , VALS ( opcode_vals ) , F_OPCODE , "Operation code" , HFILL }
}
, {
& hf_dns_flags_authoritative , {
"Authoritative" , "dns.flags.authoritative" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_authoritative ) , F_AUTHORITATIVE , "Is the server is an authority for the domain?" , HFILL }
}
, {
& hf_dns_flags_conflict_query , {
"Conflict" , "dns.flags.conflict" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_conflict_query ) , F_CONFLICT , "Did we receive multiple responses to a query?" , HFILL }
}
, {
& hf_dns_flags_conflict_response , {
"Conflict" , "dns.flags.conflict" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_conflict_response ) , F_CONFLICT , "Is the name considered unique?" , HFILL }
}
, {
& hf_dns_flags_truncated , {
"Truncated" , "dns.flags.truncated" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_truncated ) , F_TRUNCATED , "Is the message truncated?" , HFILL }
}
, {
& hf_dns_flags_recdesired , {
"Recursion desired" , "dns.flags.recdesired" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_recdesired ) , F_RECDESIRED , "Do query recursively?" , HFILL }
}
, {
& hf_dns_flags_tentative , {
"Tentative" , "dns.flags.tentative" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_tentative ) , F_TENTATIVE , "Is the responder authoritative for the name, but not yet verified the uniqueness?" , HFILL }
}
, {
& hf_dns_flags_recavail , {
"Recursion available" , "dns.flags.recavail" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_recavail ) , F_RECAVAIL , "Can the server do recursive queries?" , HFILL }
}
, {
& hf_dns_flags_z , {
"Z" , "dns.flags.z" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_z ) , F_Z , "Z flag" , HFILL }
}
, {
& hf_dns_flags_authenticated , {
"Answer authenticated" , "dns.flags.authenticated" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_authenticated ) , F_AUTHENTIC , "Was the reply data authenticated by the server?" , HFILL }
}
, {
& hf_dns_flags_ad , {
"AD bit" , "dns.flags.authenticated" , FT_BOOLEAN , 16 , TFS ( & tfs_set_notset ) , F_AUTHENTIC , NULL , HFILL }
}
, {
& hf_dns_flags_checkdisable , {
"Non-authenticated data" , "dns.flags.checkdisable" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_checkdisable ) , F_CHECKDISABLE , "Is non-authenticated data acceptable?" , HFILL }
}
, {
& hf_dns_flags_rcode , {
"Reply code" , "dns.flags.rcode" , FT_UINT16 , BASE_DEC , VALS ( rcode_vals ) , F_RCODE , NULL , HFILL }
}
, {
& hf_dns_transaction_id , {
"Transaction ID" , "dns.id" , FT_UINT16 , BASE_HEX , NULL , 0x0 , "Identification of transaction" , HFILL }
}
, {
& hf_dns_qry_type , {
"Type" , "dns.qry.type" , FT_UINT16 , BASE_DEC | BASE_EXT_STRING , & dns_types_description_vals_ext , 0 , "Query Type" , HFILL }
}
, {
& hf_dns_qry_class , {
"Class" , "dns.qry.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x0 , "Query Class" , HFILL }
}
, {
& hf_dns_qry_class_mdns , {
"Class" , "dns.qry.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x7FFF , "Query Class" , HFILL }
}
, {
& hf_dns_qry_qu , {
"\"QU\" question" , "dns.qry.qu" , FT_BOOLEAN , 16 , NULL , C_QU , "QU flag" , HFILL }
}
, {
& hf_dns_qry_name , {
"Name" , "dns.qry.name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Query Name" , HFILL }
}
, {
& hf_dns_qry_name_len , {
"Name Length" , "dns.qry.name.len" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Query Name Len" , HFILL }
}
, {
& hf_dns_count_labels , {
"Label Count" , "dns.count.labels" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Query Label Count" , HFILL }
}
, {
& hf_dns_rr_type , {
"Type" , "dns.resp.type" , FT_UINT16 , BASE_DEC | BASE_EXT_STRING , & dns_types_description_vals_ext , 0x0 , "Response Type" , HFILL }
}
, {
& hf_dns_rr_class , {
"Class" , "dns.resp.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x0 , "Response Class" , HFILL }
}
, {
& hf_dns_rr_class_mdns , {
"Class" , "dns.resp.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x7FFF , "Response Class" , HFILL }
}
, {
& hf_dns_rr_cache_flush , {
"Cache flush" , "dns.resp.cache_flush" , FT_BOOLEAN , 16 , NULL , C_FLUSH , "Cache flush flag" , HFILL }
}
, {
& hf_dns_rr_ext_rcode , {
"Higher bits in extended RCODE" , "dns.resp.ext_rcode" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_edns0_version , {
"EDNS0 version" , "dns.resp.edns0_version" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_z , {
"Z" , "dns.resp.z" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_z_do , {
"DO bit" , "dns.resp.z.do" , FT_BOOLEAN , 16 , TFS ( & tfs_dns_rr_z_do ) , 0x8000 , "DNSSEC OK" , HFILL }
}
, {
& hf_dns_rr_z_reserved , {
"Reserved" , "dns.resp.z.reserved" , FT_UINT16 , BASE_HEX , NULL , 0x7FFF , NULL , HFILL }
}
, {
& hf_dns_srv_service , {
"Service" , "dns.srv.service" , FT_STRING , BASE_NONE , NULL , 0x0 , "Desired service" , HFILL }
}
, {
& hf_dns_srv_proto , {
"Protocol" , "dns.srv.proto" , FT_STRING , BASE_NONE , NULL , 0x0 , "Desired protocol" , HFILL }
}
, {
& hf_dns_srv_name , {
"Name" , "dns.srv.name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Domain this resource record refers to" , HFILL }
}
, {
& hf_dns_srv_priority , {
"Priority" , "dns.srv.priority" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_srv_weight , {
"Weight" , "dns.srv.weight" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_srv_port , {
"Port" , "dns.srv.port" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_srv_target , {
"Target" , "dns.srv.target" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_order , {
"Order" , "dns.naptr.order" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_preference , {
"Preference" , "dns.naptr.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_flags_length , {
"Flags Length" , "dns.naptr.flags_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_flags , {
"Flags" , "dns.naptr.flags" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_service_length , {
"Service Length" , "dns.naptr.service_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_service , {
"Service" , "dns.naptr.service" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_regex_length , {
"Regex Length" , "dns.naptr.regex_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_regex , {
"Regex" , "dns.naptr.regex" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_replacement_length , {
"Replacement Length" , "dns.naptr.replacement_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_replacement , {
"Replacement" , "dns.naptr.replacement" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_name , {
"Name" , "dns.resp.name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Response Name" , HFILL }
}
, {
& hf_dns_rr_ttl , {
"Time to live" , "dns.resp.ttl" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "Response TTL" , HFILL }
}
, {
& hf_dns_rr_len , {
"Data length" , "dns.resp.len" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "Response Length" , HFILL }
}
, {
& hf_dns_a , {
"Address" , "dns.a" , FT_IPv4 , BASE_NONE , NULL , 0x0 , "Response Address" , HFILL }
}
, {
& hf_dns_md , {
"Mail Destination" , "dns.md" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mf , {
"Mail Forwarder" , "dns.mf" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mb , {
"MailBox Domaine" , "dns.mb" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mg , {
"Mail Group member" , "dns.mg" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mr , {
"Mail Rename domaine" , "dns.mr" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_null , {
"Null (data)" , "dns.null" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_aaaa , {
"AAAA Address" , "dns.aaaa" , FT_IPv6 , BASE_NONE , NULL , 0x0 , "AAAA Response Address" , HFILL }
}
, {
& hf_dns_cname , {
"CNAME" , "dns.cname" , FT_STRING , BASE_NONE , NULL , 0x0 , "Response Primary Name" , HFILL }
}
, {
& hf_dns_rr_udp_payload_size_mdns , {
"UDP payload size" , "dns.rr.udp_payload_size" , FT_UINT16 , BASE_HEX , NULL , 0x7FFF , NULL , HFILL }
}
, {
& hf_dns_rr_udp_payload_size , {
"UDP payload size" , "dns.rr.udp_payload_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_mname , {
"Primary name server" , "dns.soa.mname" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_rname , {
"Responsible authority's mailbox" , "dns.soa.rname" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_serial_number , {
"Serial Number" , "dns.soa.serial_number" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_refresh_interval , {
"Refresh Interval" , "dns.soa.refresh_interval" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_retry_interval , {
"Retry Interval" , "dns.soa.retry_interval" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_expire_limit , {
"Expire limit" , "dns.soa.expire_limit" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_minimum_ttl , {
"Minimum TTL" , "dns.soa.mininum_ttl" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ptr_domain_name , {
"Domain Name" , "dns.ptr.domain_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wks_address , {
"Address" , "dns.wks.address" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wks_protocol , {
"Protocol" , "dns.wks.protocol" , FT_UINT8 , BASE_DEC | BASE_EXT_STRING , & ipproto_val_ext , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wks_bits , {
"Bits" , "dns.wks.bits" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_cpu_length , {
"CPU Length" , "dns.hinfo.cpu_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_cpu , {
"CPU" , "dns.hinfo.cpu" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_os_length , {
"OS Length" , "dns.hinfo.os_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_os , {
"OS" , "dns.hinfo.os" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_minfo_r_mailbox , {
"Responsible Mailbox" , "dns.minfo.r" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_minfo_e_mailbox , {
"Error Mailbox" , "dns.minfo.e" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mx_preference , {
"Preference" , "dns.mx.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mx_mail_exchange , {
"Mail Exchange" , "dns.mx.mail_exchange" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_txt_length , {
"TXT Length" , "dns.txt.length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_txt , {
"TXT" , "dns.txt" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_openpgpkey , {
"OpenPGP Key" , "dns.openpgpkey" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_spf_length , {
"SPF Length" , "dns.spf.length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_spf , {
"SPF" , "dns.spf" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_nodeid_preference , {
"Preference" , "dns.ilnp.nid.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_nodeid , {
"NodeID" , "dns.ilnp.nid" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator32_preference , {
"Preference" , "dns.ilnp.l32.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator32 , {
"Locator32" , "dns.ilnp.l32" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator64_preference , {
"Preference" , "dns.ilnp.l64.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator64 , {
"Locator64" , "dns.ilnp.l64" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locatorfqdn_preference , {
"Preference" , "dns.ilnp.lp.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locatorfqdn , {
"Locator FQDN" , "dns.ilnp.lp" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_eui48 , {
"EUI48 Address" , "dns.eui48" , FT_ETHER , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_eui64 , {
"EUI64 Address" , "dns.eui64" , FT_EUI64 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rrsig_type_covered , {
"Type Covered" , "dns.rrsig.type_covered" , FT_UINT16 , BASE_DEC | BASE_EXT_STRING , & dns_types_description_vals_ext , 0x0 , "Identifies the type of the RRset that is covered by this RRSIG record" , HFILL }
}
, {
& hf_dns_rrsig_algorithm , {
"Algorithm" , "dns.rrsig.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , "Identifies the cryptographic algorithm used to create the signature" , HFILL }
}
, {
& hf_dns_rrsig_labels , {
"Labels" , "dns.rrsig.labels" , FT_UINT8 , BASE_DEC , NULL , 0x0 , "Specifies the number of labels in the original RRSIG RR owner name" , HFILL }
}
, {
& hf_dns_rrsig_original_ttl , {
"Original TTL" , "dns.rrsig.original_ttl" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "Specifies the TTL of the covered RRset as it appears in the authoritative zone" , HFILL }
}
, {
& hf_dns_rrsig_signature_expiration , {
"Signature Expiration" , "dns.rrsig.signature_expiration" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_rrsig_signature_inception , {
"Signature Inception" , "dns.rrsig.signature_inception" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_rrsig_key_tag , {
"Key Tag" , "dns.rrsig.key_tag" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Contains the key tag value of the DNSKEY RR that validates this signature" , HFILL }
}
, {
& hf_dns_rrsig_signers_name , {
"Signer's name" , "dns.rrsig.signers_name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Identifies the owner name of the DNSKEY RR that a validator is supposed to use to validate this signature" , HFILL }
}
, {
& hf_dns_rrsig_signature , {
"Signature" , "dns.rrsig.signature" , FT_BYTES , BASE_NONE , NULL , 0x0 , "Contains the cryptographic signature that covers the RRSIG RDATA" , HFILL }
}
, {
& hf_dns_dnskey_flags , {
"Flags" , "dns.dnskey.flags" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_zone_key , {
"Zone Key" , "dns.dnskey.flags.zone_key" , FT_BOOLEAN , 16 , TFS ( & dns_dnskey_zone_key_tfs ) , DNSKEY_FLAGS_ZK , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_key_revoked , {
"Key Revoked" , "dns.dnskey.flags.key_revoked" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , DNSKEY_FLAGS_KR , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_secure_entry_point , {
"Key Signing Key" , "dns.dnskey.flags.secure_entry_point" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , DNSKEY_FLAGS_SEP , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_reserved , {
"Key Signing Key" , "dns.dnskey.flags.reserved" , FT_UINT16 , BASE_HEX , NULL , DNSKEY_FLAGS_RSV , "Must be zero" , HFILL }
}
, {
& hf_dns_dnskey_protocol , {
"Protocol" , "dns.dnskey.protocol" , FT_UINT8 , BASE_DEC , NULL , 0x0 , "Must be 3" , HFILL }
}
, {
& hf_dns_dnskey_algorithm , {
"Algorithm" , "dns.dnskey.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , "Identifies the public key's cryptographic algorithm and determines the format of the Public Key field" , HFILL }
}
, {
& hf_dns_dnskey_key_id , {
"Key id" , "dns.dnskey.key_id" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_dnskey_public_key , {
"Public Key" , "dns.dnskey.public_key" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_flags , {
"Flags" , "dns.key.flags" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_flags_authentication , {
"Key allowed for authentication" , "dns.key.flags.authentication" , FT_BOOLEAN , 16 , TFS ( & tfs_not_allowed_allowed ) , 0x8000 , NULL , HFILL }
}
, {
& hf_dns_key_flags_confidentiality , {
"Key allowed for confidentiality" , "dns.key.flags.confidentiality" , FT_BOOLEAN , 16 , TFS ( & tfs_not_allowed_allowed ) , 0x4000 , NULL , HFILL }
}
, {
& hf_dns_key_flags_key_required , {
"Key required" , "dns.key.flags.required" , FT_BOOLEAN , 16 , TFS ( & tfs_required_experimental ) , 0x2000 , NULL , HFILL }
}
, {
& hf_dns_key_flags_associated_user , {
"Key is associated with a user" , "dns.key.flags.associated_user" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , 0x0400 , NULL , HFILL }
}
, {
& hf_dns_key_flags_associated_named_entity , {
"Key is associated with the named entity" , "dns.key.flags.associated_named_entity" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , 0x0200 , NULL , HFILL }
}
, {
& hf_dns_key_flags_ipsec , {
"Key use with IPSEC" , "dns.key.flags.ipsec" , FT_BOOLEAN , 16 , TFS ( & tfs_valid_invalid ) , 0x0080 , NULL , HFILL }
}
, {
& hf_dns_key_flags_mime , {
"Key use with MIME security multiparts" , "dns.key.flags.mime" , FT_BOOLEAN , 16 , TFS ( & tfs_valid_invalid ) , 0x0040 , NULL , HFILL }
}
, {
& hf_dns_key_flags_signatory , {
"Signatory" , "dns.key.flags.signatory" , FT_UINT16 , BASE_DEC , NULL , 0x000F , NULL , HFILL }
}
, {
& hf_dns_key_protocol , {
"Protocol" , "dns.key.protocol" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_algorithm , {
"Algorithm" , "dns.key.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_key_id , {
"Key ID" , "dns.key.key_id" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_public_key , {
"Public Key" , "dns.key.public_key" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_px_preference , {
"Preference" , "dns.px.preference" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_px_map822 , {
"MAP822" , "dns.px.map822" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_px_mapx400 , {
"MAPX400" , "dns.px.map400" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_algo_name , {
"Algorithm name" , "dns.tkey.algo_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_signature_expiration , {
"Signature Expiration" , "dns.tkey.signature_expiration" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_tkey_signature_inception , {
"Signature Inception" , "dns.tkey.signature_inception" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_tkey_mode , {
"Mode" , "dns.tkey.mode" , FT_UINT16 , BASE_DEC , VALS ( tkey_mode_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_error , {
"Error" , "dns.tkey.error" , FT_UINT16 , BASE_DEC , VALS ( rcode_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_key_size , {
"Key Size" , "dns.tkey.key_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_key_data , {
"Key Data" , "dns.tkey.key_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_other_size , {
"Other Size" , "dns.tkey.other_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_other_data , {
"Other Data" , "dns.tkey.other_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_precedence , {
"Gateway Precedence" , "dns.ipseckey.gateway_precedence" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_algorithm , {
"Gateway Algorithm" , "dns.ipseckey.gateway_algorithm" , FT_UINT8 , BASE_DEC , VALS ( gw_algo_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_type , {
"Gateway Type" , "dns.ipseckey.gateway_type" , FT_UINT8 , BASE_DEC , VALS ( gw_type_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_ipv4 , {
"IPv4 Gateway" , "dns.ipseckey.gateway_ipv4" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_ipv6 , {
"IPv6 Gateway" , "dns.ipseckey.gateway_ipv6" , FT_IPv6 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_dns , {
"DNS Gateway" , "dns.ipseckey.gateway_dns" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_public_key , {
"Public Key" , "dns.ipseckey.public_key" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_a6_prefix_len , {
"Prefix len" , "dns.a6.prefix_len" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_a6_address_suffix , {
"Address Suffix" , "dns.a6.address_suffix" , FT_IPv6 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_a6_prefix_name , {
"Prefix name" , "dns.a6.prefix_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_dname , {
"Dname" , "dns.dname" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_version , {
"Version" , "dns.loc.version" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_size , {
"Size" , "dns.loc.size" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_horizontal_precision , {
"Horizontal Precision" , "dns.loc.horizontal_precision" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_vertical_precision , {
"Vertial Precision" , "dns.loc.vertial_precision" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_latitude , {
"Latitude" , "dns.loc.latitude" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_longitude , {
"Longitude" , "dns.loc.longitude" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_altitude , {
"Altitude" , "dns.loc.altitude" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_unknown_data , {
"Unknown data" , "dns.loc.unknown_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_nxt_next_domain_name , {
"Next Domain Name" , "dns.nxt.next_domain_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_kx_preference , {
"Preference" , "dns.kx.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_kx_key_exchange , {
"Key Exchange" , "dns.kx.key_exchange" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_type , {
"Type" , "dns.cert.type" , FT_UINT16 , BASE_DEC , VALS ( dns_cert_type_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_key_tag , {
"Key Tag" , "dns.cert.key_tag" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_algorithm , {
"Algorithm" , "dns.cert.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_certificate , {
"Certificate (or CRL)" , "dns.cert.certificate" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_nsec_next_domain_name , {
"Next Domain Name" , "dns.nsec.next_domain_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ns , {
"Name Server" , "dns.ns" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt , {
"Option" , "dns.opt" , FT_NONE , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_code , {
"Option Code" , "dns.opt.code" , FT_UINT16 , BASE_DEC , VALS ( edns0_opt_code_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_len , {
"Option Length" , "dns.opt.len" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_data , {
"Option Data" , "dns.opt.data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_dau , {
"DAU" , "dns.opt.dau" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , "DNSSEC Algorithm Understood" , HFILL }
}
, {
& hf_dns_opt_dhu , {
"DHU" , "dns.opt.dhu" , FT_UINT8 , BASE_DEC , VALS ( dns_ds_digest_vals ) , 0x0 , "DS Hash Understood" , HFILL }
}
, {
& hf_dns_opt_n3u , {
"N3U" , "dns.opt.n3u" , FT_UINT8 , BASE_DEC , VALS ( hash_algorithms ) , 0x0 , "NSEC3 Hash Understood" , HFILL }
}
, {
& hf_dns_opt_client_family , {
"Family" , "dns.opt.client.family" , FT_UINT16 , BASE_DEC , VALS ( afamily_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_netmask , {
"Source Netmask" , "dns.opt.client.netmask" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_scope , {
"Scope Netmask" , "dns.opt.client.scope" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_addr , {
"Client Subnet" , "dns.opt.client.addr" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_addr4 , {
"Client Subnet" , "dns.opt.client.addr4" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_addr6 , {
"Client Subnet" , "dns.opt.client.addr6" , FT_IPv6 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_count_questions , {
"Questions" , "dns.count.queries" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of queries in packet" , HFILL }
}
, {
& hf_dns_count_zones , {
"Zones" , "dns.count.zones" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of zones in packet" , HFILL }
}
, {
& hf_dns_count_answers , {
"Answer RRs" , "dns.count.answers" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of answers in packet" , HFILL }
}
, {
& hf_dns_count_prerequisites , {
"Prerequisites" , "dns.count.prerequisites" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of prerequisites in packet" , HFILL }
}
, {
& hf_dns_count_auth_rr , {
"Authority RRs" , "dns.count.auth_rr" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of authoritative records in packet" , HFILL }
}
, {
& hf_dns_count_updates , {
"Updates" , "dns.count.updates" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of updates records in packet" , HFILL }
}
, {
& hf_dns_nsec3_algo , {
"Hash algorithm" , "dns.nsec3.algo" , FT_UINT8 , BASE_DEC , VALS ( hash_algorithms ) , 0 , NULL , HFILL }
}
, {
& hf_dns_nsec3_flags , {
"NSEC3 flags" , "dns.nsec3.flags" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsec3_flag_optout , {
"NSEC3 Opt-out flag" , "dns.nsec3.flags.opt_out" , FT_BOOLEAN , 8 , TFS ( & tfs_flags_nsec3_optout ) , NSEC3_FLAG_OPTOUT , NULL , HFILL }
}
, {
& hf_dns_nsec3_iterations , {
"NSEC3 iterations" , "dns.nsec3.iterations" , FT_UINT16 , BASE_DEC , NULL , 0 , "Number of hashing iterations" , HFILL }
}
, {
& hf_dns_nsec3_salt_length , {
"Salt length" , "dns.nsec3.salt_length" , FT_UINT8 , BASE_DEC , NULL , 0 , "Length of salt in bytes" , HFILL }
}
, {
& hf_dns_nsec3_salt_value , {
"Salt value" , "dns.nsec3.salt_value" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsec3_hash_length , {
"Hash length" , "dns.nsec3.hash_length" , FT_UINT8 , BASE_DEC , NULL , 0 , "Length in bytes of next hashed owner" , HFILL }
}
, {
& hf_dns_nsec3_hash_value , {
"Next hashed owner" , "dns.nsec3.hash_value" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_tlsa_certificate_usage , {
"Certificate Usage" , "dns.tlsa.certificate_usage" , FT_UINT8 , BASE_DEC , VALS ( tlsa_certificate_usage_vals ) , 0 , "Specifies the provided association that will be used to match the certificate presented in the TLS handshake" , HFILL }
}
, {
& hf_dns_tlsa_selector , {
"Selector" , "dns.tlsa.selector" , FT_UINT8 , BASE_DEC , VALS ( tlsa_selector_vals ) , 0 , "Specifies which part of the TLS certificate presented by the server will be matched against the association data" , HFILL }
}
, {
& hf_dns_tlsa_matching_type , {
"Matching Type" , "dns.tlsa.matching_type" , FT_UINT8 , BASE_DEC , VALS ( tlsa_matching_type_vals ) , 0 , "Specifies how the certificate association is presented" , HFILL }
}
, {
& hf_dns_tlsa_certificate_association_data , {
"Certificate Association Data" , "dns.tlsa.certificate_association_data" , FT_BYTES , BASE_NONE , NULL , 0 , "The data refers to the certificate in the association" , HFILL }
}
, {
& hf_dns_tsig_algorithm_name , {
"Algorithm Name" , "dns.tsig.algorithm_name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Name of algorithm used for the MAC" , HFILL }
}
, {
& hf_dns_tsig_time_signed , {
"Time Signed" , "dns.tsig.time_signed" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tsig_original_id , {
"Original Id" , "dns.tsig.original_id" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tsig_error , {
"Error" , "dns.tsig.error" , FT_UINT16 , BASE_DEC , VALS ( rcode_vals ) , 0x0 , "Expanded RCODE for TSIG" , HFILL }
}
, {
& hf_dns_tsig_fudge , {
"Fudge" , "dns.tsig.fudge" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of bytes for the MAC" , HFILL }
}
, {
& hf_dns_tsig_mac_size , {
"MAC Size" , "dns.tsig.mac_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of bytes for the MAC" , HFILL }
}
, {
& hf_dns_tsig_other_len , {
"Other Len" , "dns.tsig.other_len" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of bytes for Other Data" , HFILL }
}
, {
& hf_dns_tsig_mac , {
"MAC" , "dns.tsig.mac" , FT_NONE , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tsig_other_data , {
"Other Data" , "dns.tsig.other_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_response_in , {
"Response In" , "dns.response_in" , FT_FRAMENUM , BASE_NONE , NULL , 0x0 , "The response to this DNS query is in this frame" , HFILL }
}
, {
& hf_dns_response_to , {
"Request In" , "dns.response_to" , FT_FRAMENUM , BASE_NONE , NULL , 0x0 , "This is a response to the DNS query in this frame" , HFILL }
}
, {
& hf_dns_time , {
"Time" , "dns.time" , FT_RELATIVE_TIME , BASE_NONE , NULL , 0x0 , "The time between the Query and the Response" , HFILL }
}
, {
& hf_dns_count_add_rr , {
"Additional RRs" , "dns.count.add_rr" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of additional records in packet" , HFILL }
}
, {
& hf_dns_sshfp_algorithm , {
"Algorithm" , "dns.sshfp.algorithm" , FT_UINT8 , BASE_DEC , VALS ( sshfp_algo_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_sshfp_fingerprint_type , {
"Fingerprint type" , "dns.sshfp.fingerprint.type" , FT_UINT8 , BASE_DEC , VALS ( sshfp_fingertype_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_sshfp_fingerprint , {
"Fingerprint" , "dns.sshfp.fingerprint" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_hit_length , {
"HIT length" , "dns.hip.hit.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_pk_algo , {
"HIT length" , "dns.hip.hit.pk.algo" , FT_UINT8 , BASE_DEC , VALS ( hip_algo_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_pk_length , {
"PK length" , "dns.hip.pk.length" , FT_UINT16 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_hit , {
"Host Identity Tag" , "dns.hip.hit" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_pk , {
"HIP Public Key" , "dns.hip.pk" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_rendezvous_server , {
"Rendezvous Server" , "dns.hip.rendezvous_server" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_dhcid_rdata , {
"DHCID Data" , "dns.dhcid.rdata" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_key_id , {
"Key id" , "dns.ds.key_id" , FT_UINT16 , BASE_HEX , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_algorithm , {
"Algorithm" , "dns.ds.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_digest_type , {
"Digest Type" , "dns.ds.digest_type" , FT_UINT8 , BASE_DEC , VALS ( dns_ds_digest_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_digest , {
"Digest" , "dns.ds.digest" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_address_family , {
"Address Family" , "dns.apl.address_family" , FT_UINT16 , BASE_DEC , VALS ( afamily_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_coded_prefix , {
"Prefix Length" , "dns.apl.coded_prefix" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_negation , {
"Negation Flag" , "dns.apl.negation" , FT_BOOLEAN , 8 , TFS ( & tfs_dns_apl_negation ) , DNS_APL_NEGATION , NULL , HFILL }
}
, {
& hf_dns_apl_afdlength , {
"Address Length" , "dns.apl.afdlength" , FT_UINT8 , BASE_DEC , NULL , DNS_APL_AFDLENGTH , "in octets" , HFILL }
}
, {
& hf_dns_apl_afdpart_ipv4 , {
"Address" , "dns.apl.afdpart.ipv4" , FT_IPv4 , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_afdpart_ipv6 , {
"Address" , "dns.apl.afdpart.ipv6" , FT_IPv6 , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_afdpart_data , {
"Address" , "dns.apl.afdpart.data" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_longitude_length , {
"Longitude length" , "dns.gpos.longitude_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_longitude , {
"Longitude" , "dns.gpos.longitude" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_latitude_length , {
"Latitude length" , "dns.gpos.latitude_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_latitude , {
"Latitude" , "dns.gpos.latitude" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_altitude_length , {
"Altitude length" , "dns.gpos.altitude_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_altitude , {
"Altitude" , "dns.gpos.altitude" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rp_mailbox , {
"Mailbox" , "dns.rp.mailbox" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rp_txt_rr , {
"TXT RR" , "dns.rp.txt_rr" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_afsdb_subtype , {
"Subtype" , "dns.afsdb.subtype" , FT_UINT16 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_afsdb_hostname , {
"Hostname" , "dns.afsdb.hostname" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_x25_length , {
"Length" , "dns.x25.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_x25_psdn_address , {
"PSDN-Address" , "dns.x25.psdn_address" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_length , {
"Length" , "dns.idsn.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_address , {
"ISDN Address" , "dns.idsn.address" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_sa_length , {
"Length" , "dns.idsn.sa.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_sa , {
"Sub Address" , "dns.idsn.sa.address" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rt_preference , {
"Preference" , "dns.rt.subtype" , FT_UINT16 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rt_intermediate_host , {
"Intermediate Hostname" , "dns.rt.intermediate_host" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsap_rdata , {
"NSAP Data" , "dns.nsap.rdata" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsap_ptr_owner , {
"Owner" , "dns.nsap_ptr.owner" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_caa_flags , {
"CAA Flags" , "dns.caa.flags" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_caa_flag_issuer_critical , {
"Issuer Critical" , "dns.caa.flags.issuer_critical" , FT_BOOLEAN , 8 , TFS ( & tfs_critical_not_critical ) , CAA_FLAG_ISSUER_CRITICAL , "Other CAs must not issue certificates" , HFILL }
}
, {
& hf_dns_caa_issue , {
"Issue" , "dns.caa.issue" , FT_STRING , BASE_NONE , NULL , 0x0 , "CA which is allowed to issue certificates" , HFILL }
}
, {
& hf_dns_caa_issuewild , {
"Issue Wildcard" , "dns.caa.issuewild" , FT_STRING , BASE_NONE , NULL , 0x0 , "CA which is allowed to issue wildcard certificates" , HFILL }
}
, {
& hf_dns_caa_iodef , {
"Report URL" , "dns.caa.iodef" , FT_STRING , BASE_NONE , NULL , 0x0 , "URL or email address for certificate issue requests and violation reports" , HFILL }
}
, {
& hf_dns_caa_unknown , {
"Unkown tag" , "dns.caa.unknown" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_caa_tag_length , {
"Tag length" , "dns.caa.tag_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_caa_tag , {
"Tag" , "dns.caa.tag" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_caa_value , {
"Value" , "dns.caa.value" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wins_local_flag , {
"Local Flag" , "dns.wins.local_flag" , FT_BOOLEAN , 32 , TFS ( & tfs_true_false ) , 0x1 , NULL , HFILL }
}
, {
& hf_dns_wins_lookup_timeout , {
"Lookup timeout" , "dns.wins.lookup_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_wins_cache_timeout , {
"Cache timeout" , "dns.wins.cache_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_wins_nb_wins_servers , {
"Number of WINS servers" , "dns.wins.nb_wins_servers" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wins_server , {
"WINS Server Address" , "dns.wins.wins_server" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_winsr_local_flag , {
"Local Flag" , "dns.winsr.local_flag" , FT_BOOLEAN , 32 , TFS ( & tfs_true_false ) , 0x1 , NULL , HFILL }
}
, {
& hf_dns_winsr_lookup_timeout , {
"Lookup timeout" , "dns.winsr.lookup_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_winsr_cache_timeout , {
"Cache timeout" , "dns.winsr.cache_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_winsr_name_result_domain , {
"Name Result Domain" , "dns.winsr.name_result_domain" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_data , {
"Data" , "dns.data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, }
;
static ei_register_info ei [ ] = {
{
& ei_dns_opt_bad_length , {
"dns.rr.opt.bad_length" , PI_MALFORMED , PI_ERROR , "Length too long for any type of IP address." , EXPFILL }
}
, {
& ei_dns_undecoded_option , {
"dns.undecoded.type" , PI_UNDECODED , PI_NOTE , "Undecoded option" , EXPFILL }
}
, {
& ei_dns_depr_opc , {
"dns.depr.opc" , PI_PROTOCOL , PI_WARN , "Deprecated opcode" , EXPFILL }
}
, {
& ei_ttl_negative , {
"dns.ttl.negative" , PI_PROTOCOL , PI_WARN , "TTL can't be negative" , EXPFILL }
}
, {
& ei_dns_tsig_alg , {
"dns.tsig.noalg" , PI_UNDECODED , PI_WARN , "No dissector for algorithm" , EXPFILL }
}
, }
;
static gint * ett [ ] = {
& ett_dns , & ett_dns_qd , & ett_dns_rr , & ett_dns_qry , & ett_dns_ans , & ett_dns_flags , & ett_dns_opts , & ett_nsec3_flags , & ett_key_flags , & ett_t_key , & ett_dns_mac , & ett_caa_flags , & ett_caa_data , }
;
module_t * dns_module ;
expert_module_t * expert_dns ;
proto_dns = proto_register_protocol ( "Domain Name Service" , "DNS" , "dns" ) ;
proto_register_field_array ( proto_dns , hf , array_length ( hf ) ) ;
proto_register_subtree_array ( ett , array_length ( ett ) ) ;
expert_dns = expert_register_protocol ( proto_dns ) ;
expert_register_field_array ( expert_dns , ei , array_length ( ei ) ) ;
range_convert_str ( & global_dns_tcp_port_range , DEFAULT_DNS_PORT_RANGE , MAX_TCP_PORT ) ;
range_convert_str ( & global_dns_udp_port_range , DEFAULT_DNS_PORT_RANGE , MAX_UDP_PORT ) ;
dns_module = prefs_register_protocol ( proto_dns , proto_reg_handoff_dns ) ;
prefs_register_range_preference ( dns_module , "tcp.ports" , "DNS TCP ports" , "TCP ports to be decoded as DNS (default: " DEFAULT_DNS_PORT_RANGE ")" , & global_dns_tcp_port_range , MAX_TCP_PORT ) ;
prefs_register_range_preference ( dns_module , "udp.ports" , "DNS UDP ports" , "UDP ports to be decoded as DNS (default: " DEFAULT_DNS_PORT_RANGE ")" , & global_dns_udp_port_range , MAX_UDP_PORT ) ;
prefs_register_bool_preference ( dns_module , "desegment_dns_messages" , "Reassemble DNS messages spanning multiple TCP segments" , "Whether the DNS dissector should reassemble messages spanning multiple TCP segments." " To use this option, you must also enable \"Allow subdissectors to reassemble TCP streams\" in the TCP protocol settings." , & dns_desegment ) ;
prefs_register_bool_preference ( dns_module , "use_for_addr_resolution" , "Use DNS packet data for address resolution" , "Whether addressame pairs found in dissected DNS packets should be used by Wireshark for name resolution." , & dns_use_for_addr_resolution ) ;
dns_tsig_dissector_table = register_dissector_table ( "dns.tsig.mac" , "DNS TSIG MAC Dissectors" , FT_STRING , BASE_NONE ) ;
dns_tap = register_tap ( "dns" ) ;
} | void proto_register_dns ( void ) {
static hf_register_info hf [ ] = {
{
& hf_dns_length , {
"Length" , "dns.length" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Length of DNS-over-TCP request or response" , HFILL }
}
, {
& hf_dns_flags , {
"Flags" , "dns.flags" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_flags_response , {
"Response" , "dns.flags.response" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_response ) , F_RESPONSE , "Is the message a response?" , HFILL }
}
, {
& hf_dns_flags_opcode , {
"Opcode" , "dns.flags.opcode" , FT_UINT16 , BASE_DEC , VALS ( opcode_vals ) , F_OPCODE , "Operation code" , HFILL }
}
, {
& hf_dns_flags_authoritative , {
"Authoritative" , "dns.flags.authoritative" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_authoritative ) , F_AUTHORITATIVE , "Is the server is an authority for the domain?" , HFILL }
}
, {
& hf_dns_flags_conflict_query , {
"Conflict" , "dns.flags.conflict" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_conflict_query ) , F_CONFLICT , "Did we receive multiple responses to a query?" , HFILL }
}
, {
& hf_dns_flags_conflict_response , {
"Conflict" , "dns.flags.conflict" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_conflict_response ) , F_CONFLICT , "Is the name considered unique?" , HFILL }
}
, {
& hf_dns_flags_truncated , {
"Truncated" , "dns.flags.truncated" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_truncated ) , F_TRUNCATED , "Is the message truncated?" , HFILL }
}
, {
& hf_dns_flags_recdesired , {
"Recursion desired" , "dns.flags.recdesired" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_recdesired ) , F_RECDESIRED , "Do query recursively?" , HFILL }
}
, {
& hf_dns_flags_tentative , {
"Tentative" , "dns.flags.tentative" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_tentative ) , F_TENTATIVE , "Is the responder authoritative for the name, but not yet verified the uniqueness?" , HFILL }
}
, {
& hf_dns_flags_recavail , {
"Recursion available" , "dns.flags.recavail" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_recavail ) , F_RECAVAIL , "Can the server do recursive queries?" , HFILL }
}
, {
& hf_dns_flags_z , {
"Z" , "dns.flags.z" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_z ) , F_Z , "Z flag" , HFILL }
}
, {
& hf_dns_flags_authenticated , {
"Answer authenticated" , "dns.flags.authenticated" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_authenticated ) , F_AUTHENTIC , "Was the reply data authenticated by the server?" , HFILL }
}
, {
& hf_dns_flags_ad , {
"AD bit" , "dns.flags.authenticated" , FT_BOOLEAN , 16 , TFS ( & tfs_set_notset ) , F_AUTHENTIC , NULL , HFILL }
}
, {
& hf_dns_flags_checkdisable , {
"Non-authenticated data" , "dns.flags.checkdisable" , FT_BOOLEAN , 16 , TFS ( & tfs_flags_checkdisable ) , F_CHECKDISABLE , "Is non-authenticated data acceptable?" , HFILL }
}
, {
& hf_dns_flags_rcode , {
"Reply code" , "dns.flags.rcode" , FT_UINT16 , BASE_DEC , VALS ( rcode_vals ) , F_RCODE , NULL , HFILL }
}
, {
& hf_dns_transaction_id , {
"Transaction ID" , "dns.id" , FT_UINT16 , BASE_HEX , NULL , 0x0 , "Identification of transaction" , HFILL }
}
, {
& hf_dns_qry_type , {
"Type" , "dns.qry.type" , FT_UINT16 , BASE_DEC | BASE_EXT_STRING , & dns_types_description_vals_ext , 0 , "Query Type" , HFILL }
}
, {
& hf_dns_qry_class , {
"Class" , "dns.qry.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x0 , "Query Class" , HFILL }
}
, {
& hf_dns_qry_class_mdns , {
"Class" , "dns.qry.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x7FFF , "Query Class" , HFILL }
}
, {
& hf_dns_qry_qu , {
"\"QU\" question" , "dns.qry.qu" , FT_BOOLEAN , 16 , NULL , C_QU , "QU flag" , HFILL }
}
, {
& hf_dns_qry_name , {
"Name" , "dns.qry.name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Query Name" , HFILL }
}
, {
& hf_dns_qry_name_len , {
"Name Length" , "dns.qry.name.len" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Query Name Len" , HFILL }
}
, {
& hf_dns_count_labels , {
"Label Count" , "dns.count.labels" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Query Label Count" , HFILL }
}
, {
& hf_dns_rr_type , {
"Type" , "dns.resp.type" , FT_UINT16 , BASE_DEC | BASE_EXT_STRING , & dns_types_description_vals_ext , 0x0 , "Response Type" , HFILL }
}
, {
& hf_dns_rr_class , {
"Class" , "dns.resp.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x0 , "Response Class" , HFILL }
}
, {
& hf_dns_rr_class_mdns , {
"Class" , "dns.resp.class" , FT_UINT16 , BASE_HEX , VALS ( dns_classes ) , 0x7FFF , "Response Class" , HFILL }
}
, {
& hf_dns_rr_cache_flush , {
"Cache flush" , "dns.resp.cache_flush" , FT_BOOLEAN , 16 , NULL , C_FLUSH , "Cache flush flag" , HFILL }
}
, {
& hf_dns_rr_ext_rcode , {
"Higher bits in extended RCODE" , "dns.resp.ext_rcode" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_edns0_version , {
"EDNS0 version" , "dns.resp.edns0_version" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_z , {
"Z" , "dns.resp.z" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_z_do , {
"DO bit" , "dns.resp.z.do" , FT_BOOLEAN , 16 , TFS ( & tfs_dns_rr_z_do ) , 0x8000 , "DNSSEC OK" , HFILL }
}
, {
& hf_dns_rr_z_reserved , {
"Reserved" , "dns.resp.z.reserved" , FT_UINT16 , BASE_HEX , NULL , 0x7FFF , NULL , HFILL }
}
, {
& hf_dns_srv_service , {
"Service" , "dns.srv.service" , FT_STRING , BASE_NONE , NULL , 0x0 , "Desired service" , HFILL }
}
, {
& hf_dns_srv_proto , {
"Protocol" , "dns.srv.proto" , FT_STRING , BASE_NONE , NULL , 0x0 , "Desired protocol" , HFILL }
}
, {
& hf_dns_srv_name , {
"Name" , "dns.srv.name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Domain this resource record refers to" , HFILL }
}
, {
& hf_dns_srv_priority , {
"Priority" , "dns.srv.priority" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_srv_weight , {
"Weight" , "dns.srv.weight" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_srv_port , {
"Port" , "dns.srv.port" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_srv_target , {
"Target" , "dns.srv.target" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_order , {
"Order" , "dns.naptr.order" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_preference , {
"Preference" , "dns.naptr.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_flags_length , {
"Flags Length" , "dns.naptr.flags_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_flags , {
"Flags" , "dns.naptr.flags" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_service_length , {
"Service Length" , "dns.naptr.service_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_service , {
"Service" , "dns.naptr.service" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_regex_length , {
"Regex Length" , "dns.naptr.regex_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_regex , {
"Regex" , "dns.naptr.regex" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_replacement_length , {
"Replacement Length" , "dns.naptr.replacement_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_naptr_replacement , {
"Replacement" , "dns.naptr.replacement" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rr_name , {
"Name" , "dns.resp.name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Response Name" , HFILL }
}
, {
& hf_dns_rr_ttl , {
"Time to live" , "dns.resp.ttl" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "Response TTL" , HFILL }
}
, {
& hf_dns_rr_len , {
"Data length" , "dns.resp.len" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "Response Length" , HFILL }
}
, {
& hf_dns_a , {
"Address" , "dns.a" , FT_IPv4 , BASE_NONE , NULL , 0x0 , "Response Address" , HFILL }
}
, {
& hf_dns_md , {
"Mail Destination" , "dns.md" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mf , {
"Mail Forwarder" , "dns.mf" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mb , {
"MailBox Domaine" , "dns.mb" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mg , {
"Mail Group member" , "dns.mg" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mr , {
"Mail Rename domaine" , "dns.mr" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_null , {
"Null (data)" , "dns.null" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_aaaa , {
"AAAA Address" , "dns.aaaa" , FT_IPv6 , BASE_NONE , NULL , 0x0 , "AAAA Response Address" , HFILL }
}
, {
& hf_dns_cname , {
"CNAME" , "dns.cname" , FT_STRING , BASE_NONE , NULL , 0x0 , "Response Primary Name" , HFILL }
}
, {
& hf_dns_rr_udp_payload_size_mdns , {
"UDP payload size" , "dns.rr.udp_payload_size" , FT_UINT16 , BASE_HEX , NULL , 0x7FFF , NULL , HFILL }
}
, {
& hf_dns_rr_udp_payload_size , {
"UDP payload size" , "dns.rr.udp_payload_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_mname , {
"Primary name server" , "dns.soa.mname" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_rname , {
"Responsible authority's mailbox" , "dns.soa.rname" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_serial_number , {
"Serial Number" , "dns.soa.serial_number" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_refresh_interval , {
"Refresh Interval" , "dns.soa.refresh_interval" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_retry_interval , {
"Retry Interval" , "dns.soa.retry_interval" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_expire_limit , {
"Expire limit" , "dns.soa.expire_limit" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_soa_minimum_ttl , {
"Minimum TTL" , "dns.soa.mininum_ttl" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ptr_domain_name , {
"Domain Name" , "dns.ptr.domain_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wks_address , {
"Address" , "dns.wks.address" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wks_protocol , {
"Protocol" , "dns.wks.protocol" , FT_UINT8 , BASE_DEC | BASE_EXT_STRING , & ipproto_val_ext , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wks_bits , {
"Bits" , "dns.wks.bits" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_cpu_length , {
"CPU Length" , "dns.hinfo.cpu_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_cpu , {
"CPU" , "dns.hinfo.cpu" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_os_length , {
"OS Length" , "dns.hinfo.os_length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_hinfo_os , {
"OS" , "dns.hinfo.os" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_minfo_r_mailbox , {
"Responsible Mailbox" , "dns.minfo.r" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_minfo_e_mailbox , {
"Error Mailbox" , "dns.minfo.e" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mx_preference , {
"Preference" , "dns.mx.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_mx_mail_exchange , {
"Mail Exchange" , "dns.mx.mail_exchange" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_txt_length , {
"TXT Length" , "dns.txt.length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_txt , {
"TXT" , "dns.txt" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_openpgpkey , {
"OpenPGP Key" , "dns.openpgpkey" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_spf_length , {
"SPF Length" , "dns.spf.length" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_spf , {
"SPF" , "dns.spf" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_nodeid_preference , {
"Preference" , "dns.ilnp.nid.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_nodeid , {
"NodeID" , "dns.ilnp.nid" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator32_preference , {
"Preference" , "dns.ilnp.l32.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator32 , {
"Locator32" , "dns.ilnp.l32" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator64_preference , {
"Preference" , "dns.ilnp.l64.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locator64 , {
"Locator64" , "dns.ilnp.l64" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locatorfqdn_preference , {
"Preference" , "dns.ilnp.lp.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ilnp_locatorfqdn , {
"Locator FQDN" , "dns.ilnp.lp" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_eui48 , {
"EUI48 Address" , "dns.eui48" , FT_ETHER , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_eui64 , {
"EUI64 Address" , "dns.eui64" , FT_EUI64 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_rrsig_type_covered , {
"Type Covered" , "dns.rrsig.type_covered" , FT_UINT16 , BASE_DEC | BASE_EXT_STRING , & dns_types_description_vals_ext , 0x0 , "Identifies the type of the RRset that is covered by this RRSIG record" , HFILL }
}
, {
& hf_dns_rrsig_algorithm , {
"Algorithm" , "dns.rrsig.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , "Identifies the cryptographic algorithm used to create the signature" , HFILL }
}
, {
& hf_dns_rrsig_labels , {
"Labels" , "dns.rrsig.labels" , FT_UINT8 , BASE_DEC , NULL , 0x0 , "Specifies the number of labels in the original RRSIG RR owner name" , HFILL }
}
, {
& hf_dns_rrsig_original_ttl , {
"Original TTL" , "dns.rrsig.original_ttl" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "Specifies the TTL of the covered RRset as it appears in the authoritative zone" , HFILL }
}
, {
& hf_dns_rrsig_signature_expiration , {
"Signature Expiration" , "dns.rrsig.signature_expiration" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_rrsig_signature_inception , {
"Signature Inception" , "dns.rrsig.signature_inception" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_rrsig_key_tag , {
"Key Tag" , "dns.rrsig.key_tag" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Contains the key tag value of the DNSKEY RR that validates this signature" , HFILL }
}
, {
& hf_dns_rrsig_signers_name , {
"Signer's name" , "dns.rrsig.signers_name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Identifies the owner name of the DNSKEY RR that a validator is supposed to use to validate this signature" , HFILL }
}
, {
& hf_dns_rrsig_signature , {
"Signature" , "dns.rrsig.signature" , FT_BYTES , BASE_NONE , NULL , 0x0 , "Contains the cryptographic signature that covers the RRSIG RDATA" , HFILL }
}
, {
& hf_dns_dnskey_flags , {
"Flags" , "dns.dnskey.flags" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_zone_key , {
"Zone Key" , "dns.dnskey.flags.zone_key" , FT_BOOLEAN , 16 , TFS ( & dns_dnskey_zone_key_tfs ) , DNSKEY_FLAGS_ZK , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_key_revoked , {
"Key Revoked" , "dns.dnskey.flags.key_revoked" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , DNSKEY_FLAGS_KR , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_secure_entry_point , {
"Key Signing Key" , "dns.dnskey.flags.secure_entry_point" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , DNSKEY_FLAGS_SEP , NULL , HFILL }
}
, {
& hf_dns_dnskey_flags_reserved , {
"Key Signing Key" , "dns.dnskey.flags.reserved" , FT_UINT16 , BASE_HEX , NULL , DNSKEY_FLAGS_RSV , "Must be zero" , HFILL }
}
, {
& hf_dns_dnskey_protocol , {
"Protocol" , "dns.dnskey.protocol" , FT_UINT8 , BASE_DEC , NULL , 0x0 , "Must be 3" , HFILL }
}
, {
& hf_dns_dnskey_algorithm , {
"Algorithm" , "dns.dnskey.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , "Identifies the public key's cryptographic algorithm and determines the format of the Public Key field" , HFILL }
}
, {
& hf_dns_dnskey_key_id , {
"Key id" , "dns.dnskey.key_id" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_dnskey_public_key , {
"Public Key" , "dns.dnskey.public_key" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_flags , {
"Flags" , "dns.key.flags" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_flags_authentication , {
"Key allowed for authentication" , "dns.key.flags.authentication" , FT_BOOLEAN , 16 , TFS ( & tfs_not_allowed_allowed ) , 0x8000 , NULL , HFILL }
}
, {
& hf_dns_key_flags_confidentiality , {
"Key allowed for confidentiality" , "dns.key.flags.confidentiality" , FT_BOOLEAN , 16 , TFS ( & tfs_not_allowed_allowed ) , 0x4000 , NULL , HFILL }
}
, {
& hf_dns_key_flags_key_required , {
"Key required" , "dns.key.flags.required" , FT_BOOLEAN , 16 , TFS ( & tfs_required_experimental ) , 0x2000 , NULL , HFILL }
}
, {
& hf_dns_key_flags_associated_user , {
"Key is associated with a user" , "dns.key.flags.associated_user" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , 0x0400 , NULL , HFILL }
}
, {
& hf_dns_key_flags_associated_named_entity , {
"Key is associated with the named entity" , "dns.key.flags.associated_named_entity" , FT_BOOLEAN , 16 , TFS ( & tfs_yes_no ) , 0x0200 , NULL , HFILL }
}
, {
& hf_dns_key_flags_ipsec , {
"Key use with IPSEC" , "dns.key.flags.ipsec" , FT_BOOLEAN , 16 , TFS ( & tfs_valid_invalid ) , 0x0080 , NULL , HFILL }
}
, {
& hf_dns_key_flags_mime , {
"Key use with MIME security multiparts" , "dns.key.flags.mime" , FT_BOOLEAN , 16 , TFS ( & tfs_valid_invalid ) , 0x0040 , NULL , HFILL }
}
, {
& hf_dns_key_flags_signatory , {
"Signatory" , "dns.key.flags.signatory" , FT_UINT16 , BASE_DEC , NULL , 0x000F , NULL , HFILL }
}
, {
& hf_dns_key_protocol , {
"Protocol" , "dns.key.protocol" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_algorithm , {
"Algorithm" , "dns.key.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_key_id , {
"Key ID" , "dns.key.key_id" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_key_public_key , {
"Public Key" , "dns.key.public_key" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_px_preference , {
"Preference" , "dns.px.preference" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_px_map822 , {
"MAP822" , "dns.px.map822" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_px_mapx400 , {
"MAPX400" , "dns.px.map400" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_algo_name , {
"Algorithm name" , "dns.tkey.algo_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_signature_expiration , {
"Signature Expiration" , "dns.tkey.signature_expiration" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_tkey_signature_inception , {
"Signature Inception" , "dns.tkey.signature_inception" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , "Specify a validity period for the signature" , HFILL }
}
, {
& hf_dns_tkey_mode , {
"Mode" , "dns.tkey.mode" , FT_UINT16 , BASE_DEC , VALS ( tkey_mode_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_error , {
"Error" , "dns.tkey.error" , FT_UINT16 , BASE_DEC , VALS ( rcode_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_key_size , {
"Key Size" , "dns.tkey.key_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_key_data , {
"Key Data" , "dns.tkey.key_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_other_size , {
"Other Size" , "dns.tkey.other_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tkey_other_data , {
"Other Data" , "dns.tkey.other_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_precedence , {
"Gateway Precedence" , "dns.ipseckey.gateway_precedence" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_algorithm , {
"Gateway Algorithm" , "dns.ipseckey.gateway_algorithm" , FT_UINT8 , BASE_DEC , VALS ( gw_algo_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_type , {
"Gateway Type" , "dns.ipseckey.gateway_type" , FT_UINT8 , BASE_DEC , VALS ( gw_type_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_ipv4 , {
"IPv4 Gateway" , "dns.ipseckey.gateway_ipv4" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_ipv6 , {
"IPv6 Gateway" , "dns.ipseckey.gateway_ipv6" , FT_IPv6 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_gateway_dns , {
"DNS Gateway" , "dns.ipseckey.gateway_dns" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ipseckey_public_key , {
"Public Key" , "dns.ipseckey.public_key" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_a6_prefix_len , {
"Prefix len" , "dns.a6.prefix_len" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_a6_address_suffix , {
"Address Suffix" , "dns.a6.address_suffix" , FT_IPv6 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_a6_prefix_name , {
"Prefix name" , "dns.a6.prefix_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_dname , {
"Dname" , "dns.dname" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_version , {
"Version" , "dns.loc.version" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_size , {
"Size" , "dns.loc.size" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_horizontal_precision , {
"Horizontal Precision" , "dns.loc.horizontal_precision" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_vertical_precision , {
"Vertial Precision" , "dns.loc.vertial_precision" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_latitude , {
"Latitude" , "dns.loc.latitude" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_longitude , {
"Longitude" , "dns.loc.longitude" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_altitude , {
"Altitude" , "dns.loc.altitude" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_loc_unknown_data , {
"Unknown data" , "dns.loc.unknown_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_nxt_next_domain_name , {
"Next Domain Name" , "dns.nxt.next_domain_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_kx_preference , {
"Preference" , "dns.kx.preference" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_kx_key_exchange , {
"Key Exchange" , "dns.kx.key_exchange" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_type , {
"Type" , "dns.cert.type" , FT_UINT16 , BASE_DEC , VALS ( dns_cert_type_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_key_tag , {
"Key Tag" , "dns.cert.key_tag" , FT_UINT16 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_algorithm , {
"Algorithm" , "dns.cert.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_cert_certificate , {
"Certificate (or CRL)" , "dns.cert.certificate" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_nsec_next_domain_name , {
"Next Domain Name" , "dns.nsec.next_domain_name" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_ns , {
"Name Server" , "dns.ns" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt , {
"Option" , "dns.opt" , FT_NONE , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_code , {
"Option Code" , "dns.opt.code" , FT_UINT16 , BASE_DEC , VALS ( edns0_opt_code_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_len , {
"Option Length" , "dns.opt.len" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_data , {
"Option Data" , "dns.opt.data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_dau , {
"DAU" , "dns.opt.dau" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0x0 , "DNSSEC Algorithm Understood" , HFILL }
}
, {
& hf_dns_opt_dhu , {
"DHU" , "dns.opt.dhu" , FT_UINT8 , BASE_DEC , VALS ( dns_ds_digest_vals ) , 0x0 , "DS Hash Understood" , HFILL }
}
, {
& hf_dns_opt_n3u , {
"N3U" , "dns.opt.n3u" , FT_UINT8 , BASE_DEC , VALS ( hash_algorithms ) , 0x0 , "NSEC3 Hash Understood" , HFILL }
}
, {
& hf_dns_opt_client_family , {
"Family" , "dns.opt.client.family" , FT_UINT16 , BASE_DEC , VALS ( afamily_vals ) , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_netmask , {
"Source Netmask" , "dns.opt.client.netmask" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_scope , {
"Scope Netmask" , "dns.opt.client.scope" , FT_UINT8 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_addr , {
"Client Subnet" , "dns.opt.client.addr" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_addr4 , {
"Client Subnet" , "dns.opt.client.addr4" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_opt_client_addr6 , {
"Client Subnet" , "dns.opt.client.addr6" , FT_IPv6 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_count_questions , {
"Questions" , "dns.count.queries" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of queries in packet" , HFILL }
}
, {
& hf_dns_count_zones , {
"Zones" , "dns.count.zones" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of zones in packet" , HFILL }
}
, {
& hf_dns_count_answers , {
"Answer RRs" , "dns.count.answers" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of answers in packet" , HFILL }
}
, {
& hf_dns_count_prerequisites , {
"Prerequisites" , "dns.count.prerequisites" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of prerequisites in packet" , HFILL }
}
, {
& hf_dns_count_auth_rr , {
"Authority RRs" , "dns.count.auth_rr" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of authoritative records in packet" , HFILL }
}
, {
& hf_dns_count_updates , {
"Updates" , "dns.count.updates" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of updates records in packet" , HFILL }
}
, {
& hf_dns_nsec3_algo , {
"Hash algorithm" , "dns.nsec3.algo" , FT_UINT8 , BASE_DEC , VALS ( hash_algorithms ) , 0 , NULL , HFILL }
}
, {
& hf_dns_nsec3_flags , {
"NSEC3 flags" , "dns.nsec3.flags" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsec3_flag_optout , {
"NSEC3 Opt-out flag" , "dns.nsec3.flags.opt_out" , FT_BOOLEAN , 8 , TFS ( & tfs_flags_nsec3_optout ) , NSEC3_FLAG_OPTOUT , NULL , HFILL }
}
, {
& hf_dns_nsec3_iterations , {
"NSEC3 iterations" , "dns.nsec3.iterations" , FT_UINT16 , BASE_DEC , NULL , 0 , "Number of hashing iterations" , HFILL }
}
, {
& hf_dns_nsec3_salt_length , {
"Salt length" , "dns.nsec3.salt_length" , FT_UINT8 , BASE_DEC , NULL , 0 , "Length of salt in bytes" , HFILL }
}
, {
& hf_dns_nsec3_salt_value , {
"Salt value" , "dns.nsec3.salt_value" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsec3_hash_length , {
"Hash length" , "dns.nsec3.hash_length" , FT_UINT8 , BASE_DEC , NULL , 0 , "Length in bytes of next hashed owner" , HFILL }
}
, {
& hf_dns_nsec3_hash_value , {
"Next hashed owner" , "dns.nsec3.hash_value" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_tlsa_certificate_usage , {
"Certificate Usage" , "dns.tlsa.certificate_usage" , FT_UINT8 , BASE_DEC , VALS ( tlsa_certificate_usage_vals ) , 0 , "Specifies the provided association that will be used to match the certificate presented in the TLS handshake" , HFILL }
}
, {
& hf_dns_tlsa_selector , {
"Selector" , "dns.tlsa.selector" , FT_UINT8 , BASE_DEC , VALS ( tlsa_selector_vals ) , 0 , "Specifies which part of the TLS certificate presented by the server will be matched against the association data" , HFILL }
}
, {
& hf_dns_tlsa_matching_type , {
"Matching Type" , "dns.tlsa.matching_type" , FT_UINT8 , BASE_DEC , VALS ( tlsa_matching_type_vals ) , 0 , "Specifies how the certificate association is presented" , HFILL }
}
, {
& hf_dns_tlsa_certificate_association_data , {
"Certificate Association Data" , "dns.tlsa.certificate_association_data" , FT_BYTES , BASE_NONE , NULL , 0 , "The data refers to the certificate in the association" , HFILL }
}
, {
& hf_dns_tsig_algorithm_name , {
"Algorithm Name" , "dns.tsig.algorithm_name" , FT_STRING , BASE_NONE , NULL , 0x0 , "Name of algorithm used for the MAC" , HFILL }
}
, {
& hf_dns_tsig_time_signed , {
"Time Signed" , "dns.tsig.time_signed" , FT_ABSOLUTE_TIME , ABSOLUTE_TIME_LOCAL , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tsig_original_id , {
"Original Id" , "dns.tsig.original_id" , FT_UINT16 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tsig_error , {
"Error" , "dns.tsig.error" , FT_UINT16 , BASE_DEC , VALS ( rcode_vals ) , 0x0 , "Expanded RCODE for TSIG" , HFILL }
}
, {
& hf_dns_tsig_fudge , {
"Fudge" , "dns.tsig.fudge" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of bytes for the MAC" , HFILL }
}
, {
& hf_dns_tsig_mac_size , {
"MAC Size" , "dns.tsig.mac_size" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of bytes for the MAC" , HFILL }
}
, {
& hf_dns_tsig_other_len , {
"Other Len" , "dns.tsig.other_len" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of bytes for Other Data" , HFILL }
}
, {
& hf_dns_tsig_mac , {
"MAC" , "dns.tsig.mac" , FT_NONE , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_tsig_other_data , {
"Other Data" , "dns.tsig.other_data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_response_in , {
"Response In" , "dns.response_in" , FT_FRAMENUM , BASE_NONE , NULL , 0x0 , "The response to this DNS query is in this frame" , HFILL }
}
, {
& hf_dns_response_to , {
"Request In" , "dns.response_to" , FT_FRAMENUM , BASE_NONE , NULL , 0x0 , "This is a response to the DNS query in this frame" , HFILL }
}
, {
& hf_dns_time , {
"Time" , "dns.time" , FT_RELATIVE_TIME , BASE_NONE , NULL , 0x0 , "The time between the Query and the Response" , HFILL }
}
, {
& hf_dns_count_add_rr , {
"Additional RRs" , "dns.count.add_rr" , FT_UINT16 , BASE_DEC , NULL , 0x0 , "Number of additional records in packet" , HFILL }
}
, {
& hf_dns_sshfp_algorithm , {
"Algorithm" , "dns.sshfp.algorithm" , FT_UINT8 , BASE_DEC , VALS ( sshfp_algo_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_sshfp_fingerprint_type , {
"Fingerprint type" , "dns.sshfp.fingerprint.type" , FT_UINT8 , BASE_DEC , VALS ( sshfp_fingertype_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_sshfp_fingerprint , {
"Fingerprint" , "dns.sshfp.fingerprint" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_hit_length , {
"HIT length" , "dns.hip.hit.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_pk_algo , {
"HIT length" , "dns.hip.hit.pk.algo" , FT_UINT8 , BASE_DEC , VALS ( hip_algo_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_pk_length , {
"PK length" , "dns.hip.pk.length" , FT_UINT16 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_hit , {
"Host Identity Tag" , "dns.hip.hit" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_pk , {
"HIP Public Key" , "dns.hip.pk" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_hip_rendezvous_server , {
"Rendezvous Server" , "dns.hip.rendezvous_server" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_dhcid_rdata , {
"DHCID Data" , "dns.dhcid.rdata" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_key_id , {
"Key id" , "dns.ds.key_id" , FT_UINT16 , BASE_HEX , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_algorithm , {
"Algorithm" , "dns.ds.algorithm" , FT_UINT8 , BASE_DEC , VALS ( dnssec_algo_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_digest_type , {
"Digest Type" , "dns.ds.digest_type" , FT_UINT8 , BASE_DEC , VALS ( dns_ds_digest_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_ds_digest , {
"Digest" , "dns.ds.digest" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_address_family , {
"Address Family" , "dns.apl.address_family" , FT_UINT16 , BASE_DEC , VALS ( afamily_vals ) , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_coded_prefix , {
"Prefix Length" , "dns.apl.coded_prefix" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_negation , {
"Negation Flag" , "dns.apl.negation" , FT_BOOLEAN , 8 , TFS ( & tfs_dns_apl_negation ) , DNS_APL_NEGATION , NULL , HFILL }
}
, {
& hf_dns_apl_afdlength , {
"Address Length" , "dns.apl.afdlength" , FT_UINT8 , BASE_DEC , NULL , DNS_APL_AFDLENGTH , "in octets" , HFILL }
}
, {
& hf_dns_apl_afdpart_ipv4 , {
"Address" , "dns.apl.afdpart.ipv4" , FT_IPv4 , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_afdpart_ipv6 , {
"Address" , "dns.apl.afdpart.ipv6" , FT_IPv6 , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_apl_afdpart_data , {
"Address" , "dns.apl.afdpart.data" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_longitude_length , {
"Longitude length" , "dns.gpos.longitude_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_longitude , {
"Longitude" , "dns.gpos.longitude" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_latitude_length , {
"Latitude length" , "dns.gpos.latitude_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_latitude , {
"Latitude" , "dns.gpos.latitude" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_altitude_length , {
"Altitude length" , "dns.gpos.altitude_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_gpos_altitude , {
"Altitude" , "dns.gpos.altitude" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rp_mailbox , {
"Mailbox" , "dns.rp.mailbox" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rp_txt_rr , {
"TXT RR" , "dns.rp.txt_rr" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_afsdb_subtype , {
"Subtype" , "dns.afsdb.subtype" , FT_UINT16 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_afsdb_hostname , {
"Hostname" , "dns.afsdb.hostname" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_x25_length , {
"Length" , "dns.x25.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_x25_psdn_address , {
"PSDN-Address" , "dns.x25.psdn_address" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_length , {
"Length" , "dns.idsn.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_address , {
"ISDN Address" , "dns.idsn.address" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_sa_length , {
"Length" , "dns.idsn.sa.length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_isdn_sa , {
"Sub Address" , "dns.idsn.sa.address" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rt_preference , {
"Preference" , "dns.rt.subtype" , FT_UINT16 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_rt_intermediate_host , {
"Intermediate Hostname" , "dns.rt.intermediate_host" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsap_rdata , {
"NSAP Data" , "dns.nsap.rdata" , FT_BYTES , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_nsap_ptr_owner , {
"Owner" , "dns.nsap_ptr.owner" , FT_STRING , BASE_NONE , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_caa_flags , {
"CAA Flags" , "dns.caa.flags" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_caa_flag_issuer_critical , {
"Issuer Critical" , "dns.caa.flags.issuer_critical" , FT_BOOLEAN , 8 , TFS ( & tfs_critical_not_critical ) , CAA_FLAG_ISSUER_CRITICAL , "Other CAs must not issue certificates" , HFILL }
}
, {
& hf_dns_caa_issue , {
"Issue" , "dns.caa.issue" , FT_STRING , BASE_NONE , NULL , 0x0 , "CA which is allowed to issue certificates" , HFILL }
}
, {
& hf_dns_caa_issuewild , {
"Issue Wildcard" , "dns.caa.issuewild" , FT_STRING , BASE_NONE , NULL , 0x0 , "CA which is allowed to issue wildcard certificates" , HFILL }
}
, {
& hf_dns_caa_iodef , {
"Report URL" , "dns.caa.iodef" , FT_STRING , BASE_NONE , NULL , 0x0 , "URL or email address for certificate issue requests and violation reports" , HFILL }
}
, {
& hf_dns_caa_unknown , {
"Unkown tag" , "dns.caa.unknown" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_caa_tag_length , {
"Tag length" , "dns.caa.tag_length" , FT_UINT8 , BASE_DEC , NULL , 0 , NULL , HFILL }
}
, {
& hf_dns_caa_tag , {
"Tag" , "dns.caa.tag" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_caa_value , {
"Value" , "dns.caa.value" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wins_local_flag , {
"Local Flag" , "dns.wins.local_flag" , FT_BOOLEAN , 32 , TFS ( & tfs_true_false ) , 0x1 , NULL , HFILL }
}
, {
& hf_dns_wins_lookup_timeout , {
"Lookup timeout" , "dns.wins.lookup_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_wins_cache_timeout , {
"Cache timeout" , "dns.wins.cache_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_wins_nb_wins_servers , {
"Number of WINS servers" , "dns.wins.nb_wins_servers" , FT_UINT32 , BASE_DEC , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_wins_server , {
"WINS Server Address" , "dns.wins.wins_server" , FT_IPv4 , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_winsr_local_flag , {
"Local Flag" , "dns.winsr.local_flag" , FT_BOOLEAN , 32 , TFS ( & tfs_true_false ) , 0x1 , NULL , HFILL }
}
, {
& hf_dns_winsr_lookup_timeout , {
"Lookup timeout" , "dns.winsr.lookup_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_winsr_cache_timeout , {
"Cache timeout" , "dns.winsr.cache_timeout" , FT_UINT32 , BASE_DEC , NULL , 0x0 , "In seconds" , HFILL }
}
, {
& hf_dns_winsr_name_result_domain , {
"Name Result Domain" , "dns.winsr.name_result_domain" , FT_STRING , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, {
& hf_dns_data , {
"Data" , "dns.data" , FT_BYTES , BASE_NONE , NULL , 0x0 , NULL , HFILL }
}
, }
;
static ei_register_info ei [ ] = {
{
& ei_dns_opt_bad_length , {
"dns.rr.opt.bad_length" , PI_MALFORMED , PI_ERROR , "Length too long for any type of IP address." , EXPFILL }
}
, {
& ei_dns_undecoded_option , {
"dns.undecoded.type" , PI_UNDECODED , PI_NOTE , "Undecoded option" , EXPFILL }
}
, {
& ei_dns_depr_opc , {
"dns.depr.opc" , PI_PROTOCOL , PI_WARN , "Deprecated opcode" , EXPFILL }
}
, {
& ei_ttl_negative , {
"dns.ttl.negative" , PI_PROTOCOL , PI_WARN , "TTL can't be negative" , EXPFILL }
}
, {
& ei_dns_tsig_alg , {
"dns.tsig.noalg" , PI_UNDECODED , PI_WARN , "No dissector for algorithm" , EXPFILL }
}
, }
;
static gint * ett [ ] = {
& ett_dns , & ett_dns_qd , & ett_dns_rr , & ett_dns_qry , & ett_dns_ans , & ett_dns_flags , & ett_dns_opts , & ett_nsec3_flags , & ett_key_flags , & ett_t_key , & ett_dns_mac , & ett_caa_flags , & ett_caa_data , }
;
module_t * dns_module ;
expert_module_t * expert_dns ;
proto_dns = proto_register_protocol ( "Domain Name Service" , "DNS" , "dns" ) ;
proto_register_field_array ( proto_dns , hf , array_length ( hf ) ) ;
proto_register_subtree_array ( ett , array_length ( ett ) ) ;
expert_dns = expert_register_protocol ( proto_dns ) ;
expert_register_field_array ( expert_dns , ei , array_length ( ei ) ) ;
range_convert_str ( & global_dns_tcp_port_range , DEFAULT_DNS_PORT_RANGE , MAX_TCP_PORT ) ;
range_convert_str ( & global_dns_udp_port_range , DEFAULT_DNS_PORT_RANGE , MAX_UDP_PORT ) ;
dns_module = prefs_register_protocol ( proto_dns , proto_reg_handoff_dns ) ;
prefs_register_range_preference ( dns_module , "tcp.ports" , "DNS TCP ports" , "TCP ports to be decoded as DNS (default: " DEFAULT_DNS_PORT_RANGE ")" , & global_dns_tcp_port_range , MAX_TCP_PORT ) ;
prefs_register_range_preference ( dns_module , "udp.ports" , "DNS UDP ports" , "UDP ports to be decoded as DNS (default: " DEFAULT_DNS_PORT_RANGE ")" , & global_dns_udp_port_range , MAX_UDP_PORT ) ;
prefs_register_bool_preference ( dns_module , "desegment_dns_messages" , "Reassemble DNS messages spanning multiple TCP segments" , "Whether the DNS dissector should reassemble messages spanning multiple TCP segments." " To use this option, you must also enable \"Allow subdissectors to reassemble TCP streams\" in the TCP protocol settings." , & dns_desegment ) ;
prefs_register_bool_preference ( dns_module , "use_for_addr_resolution" , "Use DNS packet data for address resolution" , "Whether addressame pairs found in dissected DNS packets should be used by Wireshark for name resolution." , & dns_use_for_addr_resolution ) ;
dns_tsig_dissector_table = register_dissector_table ( "dns.tsig.mac" , "DNS TSIG MAC Dissectors" , FT_STRING , BASE_NONE ) ;
dns_tap = register_tap ( "dns" ) ;
} | 1,387 |
0 | int PEM_write_ ## name ( FILE * fp , type * x ) ;
# define DECLARE_PEM_write_fp_const ( name , type ) int PEM_write_ ## name ( FILE * fp , const type * x ) ;
# define DECLARE_PEM_write_cb_fp ( name , type ) int PEM_write_ ## name ( FILE * fp , type * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
# endif # define DECLARE_PEM_read_bio ( name , type ) type * PEM_read_bio_ ## name ( BIO * bp , type * * x , pem_password_cb * cb , void * u ) ;
# define DECLARE_PEM_write_bio ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , type * x ) ;
# define DECLARE_PEM_write_bio_const ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , const type * x ) ;
# define DECLARE_PEM_write_cb_bio ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , type * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
# define DECLARE_PEM_write ( name , type ) DECLARE_PEM_write_bio ( name , type ) DECLARE_PEM_write_fp ( name , type ) # define DECLARE_PEM_write_const ( name , type ) DECLARE_PEM_write_bio_const ( name , type ) DECLARE_PEM_write_fp_const ( name , type ) # define DECLARE_PEM_write_cb ( name , type ) DECLARE_PEM_write_cb_bio ( name , type ) DECLARE_PEM_write_cb_fp ( name , type ) # define DECLARE_PEM_read ( name , type ) DECLARE_PEM_read_bio ( name , type ) DECLARE_PEM_read_fp ( name , type ) # define DECLARE_PEM_rw ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write ( name , type ) # define DECLARE_PEM_rw_const ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_const ( name , type ) # define DECLARE_PEM_rw_cb ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_cb ( name , type ) typedef int pem_password_cb ( char * buf , int size , int rwflag , void * userdata ) ;
int PEM_get_EVP_CIPHER_INFO ( char * header , EVP_CIPHER_INFO * cipher ) ;
int PEM_do_header ( EVP_CIPHER_INFO * cipher , unsigned char * data , long * len , pem_password_cb * callback , void * u ) ;
int PEM_read_bio ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len ) ;
# define PEM_FLAG_SECURE 0x1 # define PEM_FLAG_EAY_COMPATIBLE 0x2 # define PEM_FLAG_ONLY_B64 0x4 int PEM_read_bio_ex ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len , unsigned int flags ) ;
int PEM_bytes_read_bio_secmem ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ;
int PEM_write_bio ( BIO * bp , const char * name , const char * hdr , const unsigned char * data , long len ) ;
int PEM_bytes_read_bio ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ;
void * PEM_ASN1_read_bio ( d2i_of_void * d2i , const char * name , BIO * bp , void * * x , pem_password_cb * cb , void * u ) ;
int PEM_ASN1_write_bio ( i2d_of_void * i2d , const char * name , BIO * bp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
STACK_OF ( X509_INFO ) * PEM_X509_INFO_read_bio ( BIO * bp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ;
int PEM_X509_INFO_write_bio ( BIO * bp , X509_INFO * xi , EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cd , void * u ) ;
# ifndef OPENSSL_NO_STDIO int PEM_read ( FILE * fp , char * * name , char * * header , unsigned char * * data , long * len ) ;
int PEM_write ( FILE * fp , const char * name , const char * hdr , const unsigned char * data , long len ) ;
void * PEM_ASN1_read ( d2i_of_void * d2i , const char * name , FILE * fp , void * * x , pem_password_cb * cb , void * u ) ;
int PEM_ASN1_write ( i2d_of_void * i2d , const char * name , FILE * fp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * callback , void * u ) ;
STACK_OF ( X509_INFO ) * PEM_X509_INFO_read ( FILE * fp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ;
# endif int PEM_SignInit ( EVP_MD_CTX * ctx , EVP_MD * type ) ;
int PEM_SignUpdate ( EVP_MD_CTX * ctx , unsigned char * d , unsigned int cnt ) ;
int PEM_SignFinal ( EVP_MD_CTX * ctx , unsigned char * sigret , unsigned int * siglen , EVP_PKEY * pkey ) ;
int PEM_def_callback ( char * buf , int num , int rwflag , void * userdata ) ;
void PEM_proc_type ( char * buf , int type ) ;
void PEM_dek_info ( char * buf , const char * type , int len , char * str ) ;
# include < openssl / symhacks . h > DECLARE_PEM_rw ( X509 , X509 ) DECLARE_PEM_rw ( X509_AUX , X509 ) DECLARE_PEM_rw ( X509_REQ , X509_REQ ) DECLARE_PEM_write ( X509_REQ_NEW , X509_REQ ) DECLARE_PEM_rw ( X509_CRL , X509_CRL ) DECLARE_PEM_rw ( PKCS7 , PKCS7 ) DECLARE_PEM_rw ( NETSCAPE_CERT_SEQUENCE , NETSCAPE_CERT_SEQUENCE ) DECLARE_PEM_rw ( PKCS8 , X509_SIG ) DECLARE_PEM_rw ( PKCS8_PRIV_KEY_INFO , PKCS8_PRIV_KEY_INFO ) # ifndef OPENSSL_NO_RSA DECLARE_PEM_rw_cb ( RSAPrivateKey , RSA ) DECLARE_PEM_rw_const ( RSAPublicKey , RSA ) DECLARE_PEM_rw ( RSA_PUBKEY , RSA ) # endif # ifndef OPENSSL_NO_DSA DECLARE_PEM_rw_cb ( DSAPrivateKey , DSA ) DECLARE_PEM_rw ( DSA_PUBKEY , DSA ) DECLARE_PEM_rw_const ( DSAparams , DSA ) # endif # ifndef OPENSSL_NO_EC DECLARE_PEM_rw_const ( ECPKParameters , EC_GROUP ) DECLARE_PEM_rw_cb ( ECPrivateKey , EC_KEY ) DECLARE_PEM_rw ( EC_PUBKEY , EC_KEY ) | int PEM_write_ ## name ( FILE * fp , type * x ) ;
# define DECLARE_PEM_write_fp_const ( name , type ) int PEM_write_ ## name ( FILE * fp , const type * x ) ;
# define DECLARE_PEM_write_cb_fp ( name , type ) int PEM_write_ ## name ( FILE * fp , type * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
# endif # define DECLARE_PEM_read_bio ( name , type ) type * PEM_read_bio_ ## name ( BIO * bp , type * * x , pem_password_cb * cb , void * u ) ;
# define DECLARE_PEM_write_bio ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , type * x ) ;
# define DECLARE_PEM_write_bio_const ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , const type * x ) ;
# define DECLARE_PEM_write_cb_bio ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , type * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
# define DECLARE_PEM_write ( name , type ) DECLARE_PEM_write_bio ( name , type ) DECLARE_PEM_write_fp ( name , type ) # define DECLARE_PEM_write_const ( name , type ) DECLARE_PEM_write_bio_const ( name , type ) DECLARE_PEM_write_fp_const ( name , type ) # define DECLARE_PEM_write_cb ( name , type ) DECLARE_PEM_write_cb_bio ( name , type ) DECLARE_PEM_write_cb_fp ( name , type ) # define DECLARE_PEM_read ( name , type ) DECLARE_PEM_read_bio ( name , type ) DECLARE_PEM_read_fp ( name , type ) # define DECLARE_PEM_rw ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write ( name , type ) # define DECLARE_PEM_rw_const ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_const ( name , type ) # define DECLARE_PEM_rw_cb ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_cb ( name , type ) typedef int pem_password_cb ( char * buf , int size , int rwflag , void * userdata ) ;
int PEM_get_EVP_CIPHER_INFO ( char * header , EVP_CIPHER_INFO * cipher ) ;
int PEM_do_header ( EVP_CIPHER_INFO * cipher , unsigned char * data , long * len , pem_password_cb * callback , void * u ) ;
int PEM_read_bio ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len ) ;
# define PEM_FLAG_SECURE 0x1 # define PEM_FLAG_EAY_COMPATIBLE 0x2 # define PEM_FLAG_ONLY_B64 0x4 int PEM_read_bio_ex ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len , unsigned int flags ) ;
int PEM_bytes_read_bio_secmem ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ;
int PEM_write_bio ( BIO * bp , const char * name , const char * hdr , const unsigned char * data , long len ) ;
int PEM_bytes_read_bio ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ;
void * PEM_ASN1_read_bio ( d2i_of_void * d2i , const char * name , BIO * bp , void * * x , pem_password_cb * cb , void * u ) ;
int PEM_ASN1_write_bio ( i2d_of_void * i2d , const char * name , BIO * bp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ;
STACK_OF ( X509_INFO ) * PEM_X509_INFO_read_bio ( BIO * bp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ;
int PEM_X509_INFO_write_bio ( BIO * bp , X509_INFO * xi , EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cd , void * u ) ;
# ifndef OPENSSL_NO_STDIO int PEM_read ( FILE * fp , char * * name , char * * header , unsigned char * * data , long * len ) ;
int PEM_write ( FILE * fp , const char * name , const char * hdr , const unsigned char * data , long len ) ;
void * PEM_ASN1_read ( d2i_of_void * d2i , const char * name , FILE * fp , void * * x , pem_password_cb * cb , void * u ) ;
int PEM_ASN1_write ( i2d_of_void * i2d , const char * name , FILE * fp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * callback , void * u ) ;
STACK_OF ( X509_INFO ) * PEM_X509_INFO_read ( FILE * fp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ;
# endif int PEM_SignInit ( EVP_MD_CTX * ctx , EVP_MD * type ) ;
int PEM_SignUpdate ( EVP_MD_CTX * ctx , unsigned char * d , unsigned int cnt ) ;
int PEM_SignFinal ( EVP_MD_CTX * ctx , unsigned char * sigret , unsigned int * siglen , EVP_PKEY * pkey ) ;
int PEM_def_callback ( char * buf , int num , int rwflag , void * userdata ) ;
void PEM_proc_type ( char * buf , int type ) ;
void PEM_dek_info ( char * buf , const char * type , int len , char * str ) ;
# include < openssl / symhacks . h > DECLARE_PEM_rw ( X509 , X509 ) DECLARE_PEM_rw ( X509_AUX , X509 ) DECLARE_PEM_rw ( X509_REQ , X509_REQ ) DECLARE_PEM_write ( X509_REQ_NEW , X509_REQ ) DECLARE_PEM_rw ( X509_CRL , X509_CRL ) DECLARE_PEM_rw ( PKCS7 , PKCS7 ) DECLARE_PEM_rw ( NETSCAPE_CERT_SEQUENCE , NETSCAPE_CERT_SEQUENCE ) DECLARE_PEM_rw ( PKCS8 , X509_SIG ) DECLARE_PEM_rw ( PKCS8_PRIV_KEY_INFO , PKCS8_PRIV_KEY_INFO ) # ifndef OPENSSL_NO_RSA DECLARE_PEM_rw_cb ( RSAPrivateKey , RSA ) DECLARE_PEM_rw_const ( RSAPublicKey , RSA ) DECLARE_PEM_rw ( RSA_PUBKEY , RSA ) # endif # ifndef OPENSSL_NO_DSA DECLARE_PEM_rw_cb ( DSAPrivateKey , DSA ) DECLARE_PEM_rw ( DSA_PUBKEY , DSA ) DECLARE_PEM_rw_const ( DSAparams , DSA ) # endif # ifndef OPENSSL_NO_EC DECLARE_PEM_rw_const ( ECPKParameters , EC_GROUP ) DECLARE_PEM_rw_cb ( ECPrivateKey , EC_KEY ) DECLARE_PEM_rw ( EC_PUBKEY , EC_KEY ) | 1,388 |
0 | bool isKeyInvalid(const String &key) {
// T39154441 - check if invalid chars exist
return key.find('\0') != -1;
} | bool isKeyInvalid(const String &key) {
return key.find('\0') != -1;
} | 1,389 |
1 | void cpu_exec_init(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); CPUClass *cc = CPU_GET_CLASS(cpu); CPUState *some_cpu; int cpu_index; #if defined(CONFIG_USER_ONLY) cpu_list_lock(); #endif cpu_index = 0; CPU_FOREACH(some_cpu) { cpu_index++; } cpu->cpu_index = cpu_index; cpu->numa_node = 0; QTAILQ_INIT(&cpu->breakpoints); QTAILQ_INIT(&cpu->watchpoints); #ifndef CONFIG_USER_ONLY cpu->as = &address_space_memory; cpu->thread_id = qemu_get_thread_id(); #endif QTAILQ_INSERT_TAIL(&cpus, cpu, node); #if defined(CONFIG_USER_ONLY) cpu_list_unlock(); #endif if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); } #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, cpu_save, cpu_load, env); assert(cc->vmsd == NULL); assert(qdev_get_vmsd(DEVICE(cpu)) == NULL); #endif if (cc->vmsd != NULL) { vmstate_register(NULL, cpu_index, cc->vmsd, cpu); } } | void cpu_exec_init(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); CPUClass *cc = CPU_GET_CLASS(cpu); CPUState *some_cpu; int cpu_index; #if defined(CONFIG_USER_ONLY) cpu_list_lock(); #endif cpu_index = 0; CPU_FOREACH(some_cpu) { cpu_index++; } cpu->cpu_index = cpu_index; cpu->numa_node = 0; QTAILQ_INIT(&cpu->breakpoints); QTAILQ_INIT(&cpu->watchpoints); #ifndef CONFIG_USER_ONLY cpu->as = &address_space_memory; cpu->thread_id = qemu_get_thread_id(); #endif QTAILQ_INSERT_TAIL(&cpus, cpu, node); #if defined(CONFIG_USER_ONLY) cpu_list_unlock(); #endif if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); } #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, cpu_save, cpu_load, env); assert(cc->vmsd == NULL); assert(qdev_get_vmsd(DEVICE(cpu)) == NULL); #endif if (cc->vmsd != NULL) { vmstate_register(NULL, cpu_index, cc->vmsd, cpu); } } | 1,390 |
1 | long video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
{
char sbuf[128];
void *mbuf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
/* Copy arguments into temp kernel buffer */
if (_IOC_DIR(cmd) != _IOC_NONE) {
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
/* too big to allocate from stack */
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
unsigned long n = cmd_input_size(cmd);
if (copy_from_user(parg, (void __user *)arg, n))
goto out;
/* zero out anything we don't copy from userspace */
if (n < _IOC_SIZE(cmd))
memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
} else {
/* read-only ioctl */
memset(parg, 0, _IOC_SIZE(cmd));
}
}
err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
if (err < 0)
goto out;
has_array_args = err;
if (has_array_args) {
/*
* When adding new types of array args, make sure that the
* parent argument to ioctl (which contains the pointer to the
* array) fits into sbuf (so that mbuf will still remain
* unused up to here).
*/
mbuf = kmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_array_args;
err = -EFAULT;
if (copy_from_user(mbuf, user_ptr, array_size))
goto out_array_args;
*kernel_ptr = mbuf;
}
/* Handles IOCTL */
err = __video_do_ioctl(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (has_array_args) {
*kernel_ptr = user_ptr;
if (copy_to_user(user_ptr, mbuf, array_size))
err = -EFAULT;
goto out_array_args;
}
if (err < 0)
goto out;
out_array_args:
/* Copy results into user buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
} | long video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
{
char sbuf[128];
void *mbuf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
if (_IOC_DIR(cmd) != _IOC_NONE) {
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
unsigned long n = cmd_input_size(cmd);
if (copy_from_user(parg, (void __user *)arg, n))
goto out;
if (n < _IOC_SIZE(cmd))
memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
} else {
memset(parg, 0, _IOC_SIZE(cmd));
}
}
err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
if (err < 0)
goto out;
has_array_args = err;
if (has_array_args) {
mbuf = kmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_array_args;
err = -EFAULT;
if (copy_from_user(mbuf, user_ptr, array_size))
goto out_array_args;
*kernel_ptr = mbuf;
}
err = __video_do_ioctl(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (has_array_args) {
*kernel_ptr = user_ptr;
if (copy_to_user(user_ptr, mbuf, array_size))
err = -EFAULT;
goto out_array_args;
}
if (err < 0)
goto out;
out_array_args:
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
} | 1,391 |
1 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
int mmu_reset_needed = 0;
int i, pending_vec, max_bits;
struct descriptor_table dt;
vcpu_load(vcpu);
dt.limit = sregs->idt.limit;
dt.base = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
dt.limit = sregs->gdt.limit;
dt.base = sregs->gdt.base;
kvm_x86_ops->set_gdt(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base);
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu))
load_pdptrs(vcpu, vcpu->arch.cr3);
if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu);
if (!irqchip_in_kernel(vcpu->kvm)) {
memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
sizeof vcpu->arch.irq_pending);
vcpu->arch.irq_summary = 0;
for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
if (vcpu->arch.irq_pending[i])
__set_bit(i, &vcpu->arch.irq_summary);
} else {
max_bits = (sizeof sregs->interrupt_bitmap) << 3;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap,
max_bits);
/* Only pending external irq is handled here */
if (pending_vec < max_bits) {
kvm_x86_ops->set_irq(vcpu, pending_vec);
pr_debug("Set back pending irq %d\n",
pending_vec);
}
kvm_pic_clear_isr_ack(vcpu->kvm);
}
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
/* Older userspace won't unhalt the vcpu on reset. */
if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!(vcpu->arch.cr0 & X86_CR0_PE))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu_put(vcpu);
return 0;
} | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
int mmu_reset_needed = 0;
int i, pending_vec, max_bits;
struct descriptor_table dt;
vcpu_load(vcpu);
dt.limit = sregs->idt.limit;
dt.base = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
dt.limit = sregs->gdt.limit;
dt.base = sregs->gdt.base;
kvm_x86_ops->set_gdt(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base);
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu))
load_pdptrs(vcpu, vcpu->arch.cr3);
if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu);
if (!irqchip_in_kernel(vcpu->kvm)) {
memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
sizeof vcpu->arch.irq_pending);
vcpu->arch.irq_summary = 0;
for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
if (vcpu->arch.irq_pending[i])
__set_bit(i, &vcpu->arch.irq_summary);
} else {
max_bits = (sizeof sregs->interrupt_bitmap) << 3;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap,
max_bits);
if (pending_vec < max_bits) {
kvm_x86_ops->set_irq(vcpu, pending_vec);
pr_debug("Set back pending irq %d\n",
pending_vec);
}
kvm_pic_clear_isr_ack(vcpu->kvm);
}
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!(vcpu->arch.cr0 & X86_CR0_PE))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu_put(vcpu);
return 0;
} | 1,392 |
0 | static Variant _php_mb_regex_ereg_replace_exec(const Variant& pattern,
const String& replacement,
const String& str,
const String& option,
OnigOptionType options) {
const char *p;
php_mb_regex_t *re;
OnigSyntaxType *syntax;
OnigRegion *regs = nullptr;
StringBuffer out_buf;
int i, err, eval, n;
OnigUChar *pos;
OnigUChar *string_lim;
char pat_buf[2];
const mbfl_encoding *enc;
{
const char *current_enc_name;
current_enc_name = php_mb_regex_mbctype2name(MBSTRG(current_mbctype));
if (current_enc_name == nullptr ||
(enc = mbfl_name2encoding(current_enc_name)) == nullptr) {
raise_warning("Unknown error");
return false;
}
}
eval = 0;
{
if (!option.empty()) {
_php_mb_regex_init_options(option.data(), option.size(),
&options, &syntax, &eval);
} else {
options |= MBSTRG(regex_default_options);
syntax = MBSTRG(regex_default_syntax);
}
}
String spattern;
if (pattern.isString()) {
spattern = pattern.toString();
} else {
/* FIXME: this code is not multibyte aware! */
pat_buf[0] = pattern.toByte();
pat_buf[1] = '\0';
spattern = String(pat_buf, 1, CopyString);
}
/* create regex pattern buffer */
re = php_mbregex_compile_pattern(spattern, options,
MBSTRG(current_mbctype), syntax);
if (re == nullptr) {
return false;
}
if (eval) {
throw_not_supported("ereg_replace", "dynamic coding");
}
/* do the actual work */
err = 0;
pos = (OnigUChar*)str.data();
string_lim = (OnigUChar*)(str.data() + str.size());
regs = onig_region_new();
while (err >= 0) {
err = onig_search(re, (OnigUChar *)str.data(), (OnigUChar *)string_lim,
pos, (OnigUChar *)string_lim, regs, 0);
if (err <= -2) {
OnigUChar err_str[ONIG_MAX_ERROR_MESSAGE_LEN];
onig_error_code_to_str(err_str, err);
raise_warning("mbregex search failure: %s", err_str);
break;
}
if (err >= 0) {
#if moriyoshi_0
if (regs->beg[0] == regs->end[0]) {
raise_warning("Empty regular expression");
break;
}
#endif
/* copy the part of the string before the match */
out_buf.append((const char *)pos,
(OnigUChar *)(str.data() + regs->beg[0]) - pos);
/* copy replacement and backrefs */
i = 0;
p = replacement.data();
while (i < replacement.size()) {
int fwd = (int)php_mb_mbchar_bytes_ex(p, enc);
n = -1;
auto const remaining = replacement.size() - i;
if (remaining >= 2 && fwd == 1 &&
p[0] == '\\' && p[1] >= '0' && p[1] <= '9') {
n = p[1] - '0';
}
if (n >= 0 && n < regs->num_regs) {
if (regs->beg[n] >= 0 && regs->beg[n] < regs->end[n] &&
regs->end[n] <= str.size()) {
out_buf.append(str.data() + regs->beg[n],
regs->end[n] - regs->beg[n]);
}
p += 2;
i += 2;
} else if (remaining >= fwd) {
out_buf.append(p, fwd);
p += fwd;
i += fwd;
} else {
raise_warning("Replacement ends with unterminated %s: 0x%hhx",
enc->name, *p);
break;
}
}
n = regs->end[0];
if ((pos - (OnigUChar *)str.data()) < n) {
pos = (OnigUChar *)(str.data() + n);
} else {
if (pos < string_lim) {
out_buf.append((const char *)pos, 1);
}
pos++;
}
} else { /* nomatch */
/* stick that last bit of string on our output */
if (string_lim - pos > 0) {
out_buf.append((const char *)pos, string_lim - pos);
}
}
onig_region_free(regs, 0);
}
if (regs != nullptr) {
onig_region_free(regs, 1);
}
if (err <= -2) {
return false;
}
return out_buf.detach();
} | static Variant _php_mb_regex_ereg_replace_exec(const Variant& pattern,
const String& replacement,
const String& str,
const String& option,
OnigOptionType options) {
const char *p;
php_mb_regex_t *re;
OnigSyntaxType *syntax;
OnigRegion *regs = nullptr;
StringBuffer out_buf;
int i, err, eval, n;
OnigUChar *pos;
OnigUChar *string_lim;
char pat_buf[2];
const mbfl_encoding *enc;
{
const char *current_enc_name;
current_enc_name = php_mb_regex_mbctype2name(MBSTRG(current_mbctype));
if (current_enc_name == nullptr ||
(enc = mbfl_name2encoding(current_enc_name)) == nullptr) {
raise_warning("Unknown error");
return false;
}
}
eval = 0;
{
if (!option.empty()) {
_php_mb_regex_init_options(option.data(), option.size(),
&options, &syntax, &eval);
} else {
options |= MBSTRG(regex_default_options);
syntax = MBSTRG(regex_default_syntax);
}
}
String spattern;
if (pattern.isString()) {
spattern = pattern.toString();
} else {
pat_buf[0] = pattern.toByte();
pat_buf[1] = '\0';
spattern = String(pat_buf, 1, CopyString);
}
re = php_mbregex_compile_pattern(spattern, options,
MBSTRG(current_mbctype), syntax);
if (re == nullptr) {
return false;
}
if (eval) {
throw_not_supported("ereg_replace", "dynamic coding");
}
err = 0;
pos = (OnigUChar*)str.data();
string_lim = (OnigUChar*)(str.data() + str.size());
regs = onig_region_new();
while (err >= 0) {
err = onig_search(re, (OnigUChar *)str.data(), (OnigUChar *)string_lim,
pos, (OnigUChar *)string_lim, regs, 0);
if (err <= -2) {
OnigUChar err_str[ONIG_MAX_ERROR_MESSAGE_LEN];
onig_error_code_to_str(err_str, err);
raise_warning("mbregex search failure: %s", err_str);
break;
}
if (err >= 0) {
#if moriyoshi_0
if (regs->beg[0] == regs->end[0]) {
raise_warning("Empty regular expression");
break;
}
#endif
out_buf.append((const char *)pos,
(OnigUChar *)(str.data() + regs->beg[0]) - pos);
i = 0;
p = replacement.data();
while (i < replacement.size()) {
int fwd = (int)php_mb_mbchar_bytes_ex(p, enc);
n = -1;
auto const remaining = replacement.size() - i;
if (remaining >= 2 && fwd == 1 &&
p[0] == '\\' && p[1] >= '0' && p[1] <= '9') {
n = p[1] - '0';
}
if (n >= 0 && n < regs->num_regs) {
if (regs->beg[n] >= 0 && regs->beg[n] < regs->end[n] &&
regs->end[n] <= str.size()) {
out_buf.append(str.data() + regs->beg[n],
regs->end[n] - regs->beg[n]);
}
p += 2;
i += 2;
} else if (remaining >= fwd) {
out_buf.append(p, fwd);
p += fwd;
i += fwd;
} else {
raise_warning("Replacement ends with unterminated %s: 0x%hhx",
enc->name, *p);
break;
}
}
n = regs->end[0];
if ((pos - (OnigUChar *)str.data()) < n) {
pos = (OnigUChar *)(str.data() + n);
} else {
if (pos < string_lim) {
out_buf.append((const char *)pos, 1);
}
pos++;
}
} else {
if (string_lim - pos > 0) {
out_buf.append((const char *)pos, string_lim - pos);
}
}
onig_region_free(regs, 0);
}
if (regs != nullptr) {
onig_region_free(regs, 1);
}
if (err <= -2) {
return false;
}
return out_buf.detach();
} | 1,393 |
0 | static void test_bug28386 ( ) {
int rc ;
MYSQL_STMT * stmt ;
MYSQL_RES * result ;
MYSQL_ROW row ;
MYSQL_BIND bind ;
const char hello [ ] = "hello world!" ;
DBUG_ENTER ( "test_bug28386" ) ;
myheader ( "test_bug28386" ) ;
rc = mysql_query ( mysql , "select @@global.log_output" ) ;
myquery ( rc ) ;
result = mysql_store_result ( mysql ) ;
DIE_UNLESS ( result ) ;
row = mysql_fetch_row ( result ) ;
if ( ! strstr ( row [ 0 ] , "TABLE" ) ) {
mysql_free_result ( result ) ;
if ( ! opt_silent ) printf ( "Skipping the test since logging to tables is not enabled\n" ) ;
return ;
}
mysql_free_result ( result ) ;
enable_query_logs ( 1 ) ;
stmt = mysql_simple_prepare ( mysql , "SELECT ?" ) ;
check_stmt ( stmt ) ;
memset ( & bind , 0 , sizeof ( bind ) ) ;
bind . buffer_type = MYSQL_TYPE_STRING ;
bind . buffer = ( void * ) hello ;
bind . buffer_length = sizeof ( hello ) ;
mysql_stmt_bind_param ( stmt , & bind ) ;
mysql_stmt_send_long_data ( stmt , 0 , hello , sizeof ( hello ) ) ;
rc = mysql_stmt_execute ( stmt ) ;
check_execute ( stmt , rc ) ;
rc = my_process_stmt_result ( stmt ) ;
DIE_UNLESS ( rc == 1 ) ;
rc = mysql_stmt_reset ( stmt ) ;
check_execute ( stmt , rc ) ;
rc = mysql_stmt_close ( stmt ) ;
DIE_UNLESS ( ! rc ) ;
rc = mysql_query ( mysql , "select * from mysql.general_log where " "command_type='Close stmt' or " "command_type='Reset stmt' or " "command_type='Long Data'" ) ;
myquery ( rc ) ;
result = mysql_store_result ( mysql ) ;
mytest ( result ) ;
DIE_UNLESS ( mysql_num_rows ( result ) == 3 ) ;
mysql_free_result ( result ) ;
restore_query_logs ( ) ;
DBUG_VOID_RETURN ;
} | static void test_bug28386 ( ) {
int rc ;
MYSQL_STMT * stmt ;
MYSQL_RES * result ;
MYSQL_ROW row ;
MYSQL_BIND bind ;
const char hello [ ] = "hello world!" ;
DBUG_ENTER ( "test_bug28386" ) ;
myheader ( "test_bug28386" ) ;
rc = mysql_query ( mysql , "select @@global.log_output" ) ;
myquery ( rc ) ;
result = mysql_store_result ( mysql ) ;
DIE_UNLESS ( result ) ;
row = mysql_fetch_row ( result ) ;
if ( ! strstr ( row [ 0 ] , "TABLE" ) ) {
mysql_free_result ( result ) ;
if ( ! opt_silent ) printf ( "Skipping the test since logging to tables is not enabled\n" ) ;
return ;
}
mysql_free_result ( result ) ;
enable_query_logs ( 1 ) ;
stmt = mysql_simple_prepare ( mysql , "SELECT ?" ) ;
check_stmt ( stmt ) ;
memset ( & bind , 0 , sizeof ( bind ) ) ;
bind . buffer_type = MYSQL_TYPE_STRING ;
bind . buffer = ( void * ) hello ;
bind . buffer_length = sizeof ( hello ) ;
mysql_stmt_bind_param ( stmt , & bind ) ;
mysql_stmt_send_long_data ( stmt , 0 , hello , sizeof ( hello ) ) ;
rc = mysql_stmt_execute ( stmt ) ;
check_execute ( stmt , rc ) ;
rc = my_process_stmt_result ( stmt ) ;
DIE_UNLESS ( rc == 1 ) ;
rc = mysql_stmt_reset ( stmt ) ;
check_execute ( stmt , rc ) ;
rc = mysql_stmt_close ( stmt ) ;
DIE_UNLESS ( ! rc ) ;
rc = mysql_query ( mysql , "select * from mysql.general_log where " "command_type='Close stmt' or " "command_type='Reset stmt' or " "command_type='Long Data'" ) ;
myquery ( rc ) ;
result = mysql_store_result ( mysql ) ;
mytest ( result ) ;
DIE_UNLESS ( mysql_num_rows ( result ) == 3 ) ;
mysql_free_result ( result ) ;
restore_query_logs ( ) ;
DBUG_VOID_RETURN ;
} | 1,394 |
1 | parse_tag_11_packet(unsigned char *data, unsigned char *contents,
size_t max_contents_bytes, size_t *tag_11_contents_size,
size_t *packet_size, size_t max_packet_size)
{
size_t body_size;
size_t length_size;
int rc = 0;
(*packet_size) = 0;
(*tag_11_contents_size) = 0;
/* This format is inspired by OpenPGP; see RFC 2440
* packet tag 11
*
* Tag 11 identifier (1 byte)
* Max Tag 11 packet size (max 3 bytes)
* Binary format specifier (1 byte)
* Filename length (1 byte)
* Filename ("_CONSOLE") (8 bytes)
* Modification date (4 bytes)
* Literal data (arbitrary)
*
* We need at least 16 bytes of data for the packet to even be
* valid.
*/
if (max_packet_size < 16) {
printk(KERN_ERR "Maximum packet size too small\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != ECRYPTFS_TAG_11_PACKET_TYPE) {
printk(KERN_WARNING "Invalid tag 11 packet format\n");
rc = -EINVAL;
goto out;
}
rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
&length_size);
if (rc) {
printk(KERN_WARNING "Invalid tag 11 packet format\n");
goto out;
}
if (body_size < 14) {
printk(KERN_WARNING "Invalid body size ([%td])\n", body_size);
rc = -EINVAL;
goto out;
}
(*packet_size) += length_size;
(*tag_11_contents_size) = (body_size - 14);
if (unlikely((*packet_size) + body_size + 1 > max_packet_size)) {
printk(KERN_ERR "Packet size exceeds max\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != 0x62) {
printk(KERN_WARNING "Unrecognizable packet\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != 0x08) {
printk(KERN_WARNING "Unrecognizable packet\n");
rc = -EINVAL;
goto out;
}
(*packet_size) += 12; /* Ignore filename and modification date */
memcpy(contents, &data[(*packet_size)], (*tag_11_contents_size));
(*packet_size) += (*tag_11_contents_size);
out:
if (rc) {
(*packet_size) = 0;
(*tag_11_contents_size) = 0;
}
return rc;
} | parse_tag_11_packet(unsigned char *data, unsigned char *contents,
size_t max_contents_bytes, size_t *tag_11_contents_size,
size_t *packet_size, size_t max_packet_size)
{
size_t body_size;
size_t length_size;
int rc = 0;
(*packet_size) = 0;
(*tag_11_contents_size) = 0;
if (max_packet_size < 16) {
printk(KERN_ERR "Maximum packet size too small\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != ECRYPTFS_TAG_11_PACKET_TYPE) {
printk(KERN_WARNING "Invalid tag 11 packet format\n");
rc = -EINVAL;
goto out;
}
rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
&length_size);
if (rc) {
printk(KERN_WARNING "Invalid tag 11 packet format\n");
goto out;
}
if (body_size < 14) {
printk(KERN_WARNING "Invalid body size ([%td])\n", body_size);
rc = -EINVAL;
goto out;
}
(*packet_size) += length_size;
(*tag_11_contents_size) = (body_size - 14);
if (unlikely((*packet_size) + body_size + 1 > max_packet_size)) {
printk(KERN_ERR "Packet size exceeds max\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != 0x62) {
printk(KERN_WARNING "Unrecognizable packet\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != 0x08) {
printk(KERN_WARNING "Unrecognizable packet\n");
rc = -EINVAL;
goto out;
}
(*packet_size) += 12;
memcpy(contents, &data[(*packet_size)], (*tag_11_contents_size));
(*packet_size) += (*tag_11_contents_size);
out:
if (rc) {
(*packet_size) = 0;
(*tag_11_contents_size) = 0;
}
return rc;
} | 1,395 |
1 | static int vhost_set_vring_file(struct vhost_dev *dev, VhostUserRequest request, struct vhost_vring_file *file) { int fds[VHOST_MEMORY_MAX_NREGIONS]; size_t fd_num = 0; VhostUserMsg msg = { .request = request, .flags = VHOST_USER_VERSION, .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, .size = sizeof(msg.payload.u64), }; if (ioeventfd_enabled() && file->fd > 0) { fds[fd_num++] = file->fd; } else { msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; } vhost_user_write(dev, &msg, fds, fd_num); return 0; } | static int vhost_set_vring_file(struct vhost_dev *dev, VhostUserRequest request, struct vhost_vring_file *file) { int fds[VHOST_MEMORY_MAX_NREGIONS]; size_t fd_num = 0; VhostUserMsg msg = { .request = request, .flags = VHOST_USER_VERSION, .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, .size = sizeof(msg.payload.u64), }; if (ioeventfd_enabled() && file->fd > 0) { fds[fd_num++] = file->fd; } else { msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; } vhost_user_write(dev, &msg, fds, fd_num); return 0; } | 1,396 |
0 | long video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
{
return video_usercopy(file, cmd, arg, __video_do_ioctl);
} | long video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
{
return video_usercopy(file, cmd, arg, __video_do_ioctl);
} | 1,397 |
1 | parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
unsigned char *data, struct list_head *auth_tok_list,
struct ecryptfs_auth_tok **new_auth_tok,
size_t *packet_size, size_t max_packet_size)
{
size_t body_size;
struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
size_t length_size;
int rc = 0;
(*packet_size) = 0;
(*new_auth_tok) = NULL;
/**
*This format is inspired by OpenPGP; see RFC 2440
* packet tag 3
*
* Tag 3 identifier (1 byte)
* Max Tag 3 packet size (max 3 bytes)
* Version (1 byte)
* Cipher code (1 byte)
* S2K specifier (1 byte)
* Hash identifier (1 byte)
* Salt (ECRYPTFS_SALT_SIZE)
* Hash iterations (1 byte)
* Encrypted key (arbitrary)
*
* (ECRYPTFS_SALT_SIZE + 7) minimum packet size
*/
if (max_packet_size < (ECRYPTFS_SALT_SIZE + 7)) {
printk(KERN_ERR "Max packet size too large\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != ECRYPTFS_TAG_3_PACKET_TYPE) {
printk(KERN_ERR "First byte != 0x%.2x; invalid packet\n",
ECRYPTFS_TAG_3_PACKET_TYPE);
rc = -EINVAL;
goto out;
}
/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
* at end of function upon failure */
auth_tok_list_item =
kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
if (!auth_tok_list_item) {
printk(KERN_ERR "Unable to allocate memory\n");
rc = -ENOMEM;
goto out;
}
(*new_auth_tok) = &auth_tok_list_item->auth_tok;
rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
&length_size);
if (rc) {
printk(KERN_WARNING "Error parsing packet length; rc = [%d]\n",
rc);
goto out_free;
}
if (unlikely(body_size < (ECRYPTFS_SALT_SIZE + 5))) {
printk(KERN_WARNING "Invalid body size ([%td])\n", body_size);
rc = -EINVAL;
goto out_free;
}
(*packet_size) += length_size;
if (unlikely((*packet_size) + body_size > max_packet_size)) {
printk(KERN_ERR "Packet size exceeds max\n");
rc = -EINVAL;
goto out_free;
}
(*new_auth_tok)->session_key.encrypted_key_size =
(body_size - (ECRYPTFS_SALT_SIZE + 5));
if (unlikely(data[(*packet_size)++] != 0x04)) {
printk(KERN_WARNING "Unknown version number [%d]\n",
data[(*packet_size) - 1]);
rc = -EINVAL;
goto out_free;
}
ecryptfs_cipher_code_to_string(crypt_stat->cipher,
(u16)data[(*packet_size)]);
/* A little extra work to differentiate among the AES key
* sizes; see RFC2440 */
switch(data[(*packet_size)++]) {
case RFC2440_CIPHER_AES_192:
crypt_stat->key_size = 24;
break;
default:
crypt_stat->key_size =
(*new_auth_tok)->session_key.encrypted_key_size;
}
ecryptfs_init_crypt_ctx(crypt_stat);
if (unlikely(data[(*packet_size)++] != 0x03)) {
printk(KERN_WARNING "Only S2K ID 3 is currently supported\n");
rc = -ENOSYS;
goto out_free;
}
/* TODO: finish the hash mapping */
switch (data[(*packet_size)++]) {
case 0x01: /* See RFC2440 for these numbers and their mappings */
/* Choose MD5 */
memcpy((*new_auth_tok)->token.password.salt,
&data[(*packet_size)], ECRYPTFS_SALT_SIZE);
(*packet_size) += ECRYPTFS_SALT_SIZE;
/* This conversion was taken straight from RFC2440 */
(*new_auth_tok)->token.password.hash_iterations =
((u32) 16 + (data[(*packet_size)] & 15))
<< ((data[(*packet_size)] >> 4) + 6);
(*packet_size)++;
/* Friendly reminder:
* (*new_auth_tok)->session_key.encrypted_key_size =
* (body_size - (ECRYPTFS_SALT_SIZE + 5)); */
memcpy((*new_auth_tok)->session_key.encrypted_key,
&data[(*packet_size)],
(*new_auth_tok)->session_key.encrypted_key_size);
(*packet_size) +=
(*new_auth_tok)->session_key.encrypted_key_size;
(*new_auth_tok)->session_key.flags &=
~ECRYPTFS_CONTAINS_DECRYPTED_KEY;
(*new_auth_tok)->session_key.flags |=
ECRYPTFS_CONTAINS_ENCRYPTED_KEY;
(*new_auth_tok)->token.password.hash_algo = 0x01; /* MD5 */
break;
default:
ecryptfs_printk(KERN_ERR, "Unsupported hash algorithm: "
"[%d]\n", data[(*packet_size) - 1]);
rc = -ENOSYS;
goto out_free;
}
(*new_auth_tok)->token_type = ECRYPTFS_PASSWORD;
/* TODO: Parametarize; we might actually want userspace to
* decrypt the session key. */
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT);
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT);
list_add(&auth_tok_list_item->list, auth_tok_list);
goto out;
out_free:
(*new_auth_tok) = NULL;
memset(auth_tok_list_item, 0,
sizeof(struct ecryptfs_auth_tok_list_item));
kmem_cache_free(ecryptfs_auth_tok_list_item_cache,
auth_tok_list_item);
out:
if (rc)
(*packet_size) = 0;
return rc;
} | parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
unsigned char *data, struct list_head *auth_tok_list,
struct ecryptfs_auth_tok **new_auth_tok,
size_t *packet_size, size_t max_packet_size)
{
size_t body_size;
struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
size_t length_size;
int rc = 0;
(*packet_size) = 0;
(*new_auth_tok) = NULL;
if (max_packet_size < (ECRYPTFS_SALT_SIZE + 7)) {
printk(KERN_ERR "Max packet size too large\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != ECRYPTFS_TAG_3_PACKET_TYPE) {
printk(KERN_ERR "First byte != 0x%.2x; invalid packet\n",
ECRYPTFS_TAG_3_PACKET_TYPE);
rc = -EINVAL;
goto out;
}
auth_tok_list_item =
kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
if (!auth_tok_list_item) {
printk(KERN_ERR "Unable to allocate memory\n");
rc = -ENOMEM;
goto out;
}
(*new_auth_tok) = &auth_tok_list_item->auth_tok;
rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
&length_size);
if (rc) {
printk(KERN_WARNING "Error parsing packet length; rc = [%d]\n",
rc);
goto out_free;
}
if (unlikely(body_size < (ECRYPTFS_SALT_SIZE + 5))) {
printk(KERN_WARNING "Invalid body size ([%td])\n", body_size);
rc = -EINVAL;
goto out_free;
}
(*packet_size) += length_size;
if (unlikely((*packet_size) + body_size > max_packet_size)) {
printk(KERN_ERR "Packet size exceeds max\n");
rc = -EINVAL;
goto out_free;
}
(*new_auth_tok)->session_key.encrypted_key_size =
(body_size - (ECRYPTFS_SALT_SIZE + 5));
if (unlikely(data[(*packet_size)++] != 0x04)) {
printk(KERN_WARNING "Unknown version number [%d]\n",
data[(*packet_size) - 1]);
rc = -EINVAL;
goto out_free;
}
ecryptfs_cipher_code_to_string(crypt_stat->cipher,
(u16)data[(*packet_size)]);
switch(data[(*packet_size)++]) {
case RFC2440_CIPHER_AES_192:
crypt_stat->key_size = 24;
break;
default:
crypt_stat->key_size =
(*new_auth_tok)->session_key.encrypted_key_size;
}
ecryptfs_init_crypt_ctx(crypt_stat);
if (unlikely(data[(*packet_size)++] != 0x03)) {
printk(KERN_WARNING "Only S2K ID 3 is currently supported\n");
rc = -ENOSYS;
goto out_free;
}
switch (data[(*packet_size)++]) {
case 0x01:
memcpy((*new_auth_tok)->token.password.salt,
&data[(*packet_size)], ECRYPTFS_SALT_SIZE);
(*packet_size) += ECRYPTFS_SALT_SIZE;
(*new_auth_tok)->token.password.hash_iterations =
((u32) 16 + (data[(*packet_size)] & 15))
<< ((data[(*packet_size)] >> 4) + 6);
(*packet_size)++;
memcpy((*new_auth_tok)->session_key.encrypted_key,
&data[(*packet_size)],
(*new_auth_tok)->session_key.encrypted_key_size);
(*packet_size) +=
(*new_auth_tok)->session_key.encrypted_key_size;
(*new_auth_tok)->session_key.flags &=
~ECRYPTFS_CONTAINS_DECRYPTED_KEY;
(*new_auth_tok)->session_key.flags |=
ECRYPTFS_CONTAINS_ENCRYPTED_KEY;
(*new_auth_tok)->token.password.hash_algo = 0x01;
break;
default:
ecryptfs_printk(KERN_ERR, "Unsupported hash algorithm: "
"[%d]\n", data[(*packet_size) - 1]);
rc = -ENOSYS;
goto out_free;
}
(*new_auth_tok)->token_type = ECRYPTFS_PASSWORD;
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT);
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT);
list_add(&auth_tok_list_item->list, auth_tok_list);
goto out;
out_free:
(*new_auth_tok) = NULL;
memset(auth_tok_list_item, 0,
sizeof(struct ecryptfs_auth_tok_list_item));
kmem_cache_free(ecryptfs_auth_tok_list_item_cache,
auth_tok_list_item);
out:
if (rc)
(*packet_size) = 0;
return rc;
} | 1,398 |
1 | static void iadst4 ( const int16_t * input , int16_t * output ) {
int s0 , s1 , s2 , s3 , s4 , s5 , s6 , s7 ;
int x0 = input [ 0 ] ;
int x1 = input [ 1 ] ;
int x2 = input [ 2 ] ;
int x3 = input [ 3 ] ;
if ( ! ( x0 | x1 | x2 | x3 ) ) {
output [ 0 ] = output [ 1 ] = output [ 2 ] = output [ 3 ] = 0 ;
return ;
}
s0 = sinpi_1_9 * x0 ;
s1 = sinpi_2_9 * x0 ;
s2 = sinpi_3_9 * x1 ;
s3 = sinpi_4_9 * x2 ;
s4 = sinpi_1_9 * x2 ;
s5 = sinpi_2_9 * x3 ;
s6 = sinpi_4_9 * x3 ;
s7 = x0 - x2 + x3 ;
x0 = s0 + s3 + s5 ;
x1 = s1 - s4 - s6 ;
x2 = sinpi_3_9 * s7 ;
x3 = s2 ;
s0 = x0 + x3 ;
s1 = x1 + x3 ;
s2 = x2 ;
s3 = x0 + x1 - x3 ;
output [ 0 ] = dct_const_round_shift ( s0 ) ;
output [ 1 ] = dct_const_round_shift ( s1 ) ;
output [ 2 ] = dct_const_round_shift ( s2 ) ;
output [ 3 ] = dct_const_round_shift ( s3 ) ;
} | static void iadst4 ( const int16_t * input , int16_t * output ) {
int s0 , s1 , s2 , s3 , s4 , s5 , s6 , s7 ;
int x0 = input [ 0 ] ;
int x1 = input [ 1 ] ;
int x2 = input [ 2 ] ;
int x3 = input [ 3 ] ;
if ( ! ( x0 | x1 | x2 | x3 ) ) {
output [ 0 ] = output [ 1 ] = output [ 2 ] = output [ 3 ] = 0 ;
return ;
}
s0 = sinpi_1_9 * x0 ;
s1 = sinpi_2_9 * x0 ;
s2 = sinpi_3_9 * x1 ;
s3 = sinpi_4_9 * x2 ;
s4 = sinpi_1_9 * x2 ;
s5 = sinpi_2_9 * x3 ;
s6 = sinpi_4_9 * x3 ;
s7 = x0 - x2 + x3 ;
x0 = s0 + s3 + s5 ;
x1 = s1 - s4 - s6 ;
x2 = sinpi_3_9 * s7 ;
x3 = s2 ;
s0 = x0 + x3 ;
s1 = x1 + x3 ;
s2 = x2 ;
s3 = x0 + x1 - x3 ;
output [ 0 ] = dct_const_round_shift ( s0 ) ;
output [ 1 ] = dct_const_round_shift ( s1 ) ;
output [ 2 ] = dct_const_round_shift ( s2 ) ;
output [ 3 ] = dct_const_round_shift ( s3 ) ;
} | 1,399 |
1 | char *qdist_pr(const struct qdist *dist, size_t n_bins, uint32_t opt) { const char *border = opt & QDIST_PR_BORDER ? "|" : ""; char *llabel, *rlabel; char *hgram; GString *s; if (dist->n == 0) { return NULL; } s = g_string_new(""); llabel = qdist_pr_label(dist, n_bins, opt, true); rlabel = qdist_pr_label(dist, n_bins, opt, false); hgram = qdist_pr_plain(dist, n_bins); g_string_append_printf(s, "%s%s%s%s%s", llabel, border, hgram, border, rlabel); g_free(llabel); g_free(rlabel); g_free(hgram); return g_string_free(s, FALSE); } | char *qdist_pr(const struct qdist *dist, size_t n_bins, uint32_t opt) { const char *border = opt & QDIST_PR_BORDER ? "|" : ""; char *llabel, *rlabel; char *hgram; GString *s; if (dist->n == 0) { return NULL; } s = g_string_new(""); llabel = qdist_pr_label(dist, n_bins, opt, true); rlabel = qdist_pr_label(dist, n_bins, opt, false); hgram = qdist_pr_plain(dist, n_bins); g_string_append_printf(s, "%s%s%s%s%s", llabel, border, hgram, border, rlabel); g_free(llabel); g_free(rlabel); g_free(hgram); return g_string_free(s, FALSE); } | 1,400 |
1 | video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
void *mbuf = NULL;
void *parg = NULL;
long err = -EINVAL;
int is_ext_ctrl;
size_t ctrls_size = 0;
void __user *user_ptr = NULL;
is_ext_ctrl = (cmd == VIDIOC_S_EXT_CTRLS || cmd == VIDIOC_G_EXT_CTRLS ||
cmd == VIDIOC_TRY_EXT_CTRLS);
/* Copy arguments into temp kernel buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_NONE:
parg = NULL;
break;
case _IOC_READ:
case _IOC_WRITE:
case (_IOC_WRITE | _IOC_READ):
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
/* too big to allocate from stack */
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE)
if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd)))
goto out;
break;
}
if (is_ext_ctrl) {
struct v4l2_ext_controls *p = parg;
/* In case of an error, tell the caller that it wasn't
a specific control that caused it. */
p->error_idx = p->count;
user_ptr = (void __user *)p->controls;
if (p->count) {
ctrls_size = sizeof(struct v4l2_ext_control) * p->count;
/* Note: v4l2_ext_controls fits in sbuf[] so mbuf is still NULL. */
mbuf = kmalloc(ctrls_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_ext_ctrl;
err = -EFAULT;
if (copy_from_user(mbuf, user_ptr, ctrls_size))
goto out_ext_ctrl;
p->controls = mbuf;
}
}
/* call driver */
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (is_ext_ctrl) {
struct v4l2_ext_controls *p = parg;
p->controls = (void *)user_ptr;
if (p->count && err == 0 && copy_to_user(user_ptr, mbuf, ctrls_size))
err = -EFAULT;
goto out_ext_ctrl;
}
if (err < 0)
goto out;
out_ext_ctrl:
/* Copy results into user buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
} | video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
void *mbuf = NULL;
void *parg = NULL;
long err = -EINVAL;
int is_ext_ctrl;
size_t ctrls_size = 0;
void __user *user_ptr = NULL;
is_ext_ctrl = (cmd == VIDIOC_S_EXT_CTRLS || cmd == VIDIOC_G_EXT_CTRLS ||
cmd == VIDIOC_TRY_EXT_CTRLS);
switch (_IOC_DIR(cmd)) {
case _IOC_NONE:
parg = NULL;
break;
case _IOC_READ:
case _IOC_WRITE:
case (_IOC_WRITE | _IOC_READ):
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE)
if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd)))
goto out;
break;
}
if (is_ext_ctrl) {
struct v4l2_ext_controls *p = parg;
p->error_idx = p->count;
user_ptr = (void __user *)p->controls;
if (p->count) {
ctrls_size = sizeof(struct v4l2_ext_control) * p->count;
mbuf = kmalloc(ctrls_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_ext_ctrl;
err = -EFAULT;
if (copy_from_user(mbuf, user_ptr, ctrls_size))
goto out_ext_ctrl;
p->controls = mbuf;
}
}
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (is_ext_ctrl) {
struct v4l2_ext_controls *p = parg;
p->controls = (void *)user_ptr;
if (p->count && err == 0 && copy_to_user(user_ptr, mbuf, ctrls_size))
err = -EFAULT;
goto out_ext_ctrl;
}
if (err < 0)
goto out;
out_ext_ctrl:
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
} | 1,402 |
1 | int huft_build(b, n, s, d, e, t, m)
unsigned *b; /* code lengths in bits (all assumed <= BMAX) */
unsigned n; /* number of codes (assumed <= N_MAX) */
unsigned s; /* number of simple-valued codes (0..s-1) */
ush *d; /* list of base values for non-simple codes */
ush *e; /* list of extra bits for non-simple codes */
struct huft **t; /* result: starting table */
int *m; /* maximum lookup bits, returns actual */
/* Given a list of code lengths and a maximum table size, make a set of
tables to decode that set of codes. Return zero on success, one if
the given code set is incomplete (the tables are still built in this
case), two if the input is invalid (all zero length codes or an
oversubscribed set of lengths), and three if not enough memory. */
{
unsigned a; /* counter for codes of length k */
unsigned c[BMAX+1]; /* bit length count table */
unsigned f; /* i repeats in table every f entries */
int g; /* maximum code length */
int h; /* table level */
register unsigned i; /* counter, current code */
register unsigned j; /* counter */
register int k; /* number of bits in current code */
int l; /* bits per table (returned in m) */
register unsigned *p; /* pointer into c[], b[], or v[] */
register struct huft *q; /* points to current table */
struct huft r; /* table entry for structure assignment */
struct huft *u[BMAX]; /* table stack */
unsigned v[N_MAX]; /* values in order of bit length */
register int w; /* bits before this table == (l * h) */
unsigned x[BMAX+1]; /* bit offsets, then code stack */
unsigned *xp; /* pointer into x */
int y; /* number of dummy codes added */
unsigned z; /* number of entries in current table */
/* Generate counts for each bit length */
memzero(c, sizeof(c));
p = b; i = n;
do {
Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"),
n-i, *p));
c[*p]++; /* assume all entries <= BMAX */
p++; /* Can't combine with above line (Solaris bug) */
} while (--i);
if (c[0] == n) /* null input--all zero length codes */
{
q = (struct huft *) malloc (2 * sizeof *q);
if (!q)
return 3;
hufts += 2;
q[0].v.t = (struct huft *) NULL;
q[1].e = 99; /* invalid code marker */
q[1].b = 1;
*t = q + 1;
*m = 1;
return 0;
}
/* Find minimum and maximum length, bound *m by those */
l = *m;
for (j = 1; j <= BMAX; j++)
if (c[j])
break;
k = j; /* minimum code length */
if ((unsigned)l < j)
l = j;
for (i = BMAX; i; i--)
if (c[i])
break;
g = i; /* maximum code length */
if ((unsigned)l > i)
l = i;
*m = l;
/* Adjust last length count to fill out codes, if needed */
for (y = 1 << j; j < i; j++, y <<= 1)
if ((y -= c[j]) < 0)
return 2; /* bad input: more codes than bits */
if ((y -= c[i]) < 0)
return 2;
c[i] += y;
/* Generate starting offsets into the value table for each length */
x[1] = j = 0;
p = c + 1; xp = x + 2;
while (--i) { /* note that i == g from above */
*xp++ = (j += *p++);
}
/* Make a table of values in order of bit lengths */
p = b; i = 0;
do {
if ((j = *p++) != 0)
v[x[j]++] = i;
} while (++i < n);
n = x[g]; /* set n to length of v */
/* Generate the Huffman codes and for each, make the table entries */
x[0] = i = 0; /* first Huffman code is zero */
p = v; /* grab values in bit order */
h = -1; /* no tables yet--level -1 */
w = -l; /* bits decoded == (l * h) */
u[0] = (struct huft *)NULL; /* just to keep compilers happy */
q = (struct huft *)NULL; /* ditto */
z = 0; /* ditto */
/* go through the bit lengths (k already is bits in shortest code) */
for (; k <= g; k++)
{
a = c[k];
while (a--)
{
/* here i is the Huffman code of length k bits for value *p */
/* make tables up to required level */
while (k > w + l)
{
h++;
w += l; /* previous table always l bits */
/* compute minimum size table less than or equal to l bits */
z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */
if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
{ /* too few codes for k-w bit table */
f -= a + 1; /* deduct codes from patterns left */
xp = c + k;
if (j < z)
while (++j < z) /* try smaller tables up to z bits */
{
if ((f <<= 1) <= *++xp)
break; /* enough codes to use up j bits */
f -= *xp; /* else deduct codes from patterns */
}
}
z = 1 << j; /* table entries for j-bit table */
/* allocate and link in new table */
if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
(struct huft *)NULL)
{
if (h)
huft_free(u[0]);
return 3; /* not enough memory */
}
hufts += z + 1; /* track memory usage */
*t = q + 1; /* link to list for huft_free() */
*(t = &(q->v.t)) = (struct huft *)NULL;
u[h] = ++q; /* table starts after link */
/* connect to last table, if there is one */
if (h)
{
x[h] = i; /* save pattern for backing up */
r.b = (uch)l; /* bits to dump before this table */
r.e = (uch)(16 + j); /* bits in this table */
r.v.t = q; /* pointer to this table */
j = i >> (w - l); /* (get around Turbo C bug) */
u[h-1][j] = r; /* connect to last table */
}
}
/* set up table entry in r */
r.b = (uch)(k - w);
if (p >= v + n)
r.e = 99; /* out of values--invalid code */
else if (*p < s)
{
r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */
r.v.n = (ush)(*p); /* simple code is just the value */
p++; /* one compiler does not like *p++ */
}
else
{
r.e = (uch)e[*p - s]; /* non-simple--look up in lists */
r.v.n = d[*p++ - s];
}
/* fill code-like entries with r */
f = 1 << (k - w);
for (j = i >> w; j < z; j += f)
q[j] = r;
/* backwards increment the k-bit code i */
for (j = 1 << (k - 1); i & j; j >>= 1)
i ^= j;
i ^= j;
/* backup over finished tables */
while ((i & ((1 << w) - 1)) != x[h])
{
h--; /* don't need to update q */
w -= l;
}
}
}
/* Return true (1) if we were given an incomplete table */
return y != 0 && g != 1;
} | int huft_build(b, n, s, d, e, t, m)
unsigned *b;
unsigned n;
unsigned s;
ush *d;
ush *e;
struct huft **t;
int *m;
{
unsigned a;
unsigned c[BMAX+1];
unsigned f;
int g;
int h;
register unsigned i;
register unsigned j;
register int k;
int l;
register unsigned *p;
register struct huft *q;
struct huft r;
struct huft *u[BMAX];
unsigned v[N_MAX];
register int w;
unsigned x[BMAX+1];
unsigned *xp;
int y;
unsigned z;
memzero(c, sizeof(c));
p = b; i = n;
do {
Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"),
n-i, *p));
c[*p]++;
p++;
} while (--i);
if (c[0] == n)
{
q = (struct huft *) malloc (2 * sizeof *q);
if (!q)
return 3;
hufts += 2;
q[0].v.t = (struct huft *) NULL;
q[1].e = 99;
q[1].b = 1;
*t = q + 1;
*m = 1;
return 0;
}
l = *m;
for (j = 1; j <= BMAX; j++)
if (c[j])
break;
k = j;
if ((unsigned)l < j)
l = j;
for (i = BMAX; i; i--)
if (c[i])
break;
g = i;
if ((unsigned)l > i)
l = i;
*m = l;
for (y = 1 << j; j < i; j++, y <<= 1)
if ((y -= c[j]) < 0)
return 2;
if ((y -= c[i]) < 0)
return 2;
c[i] += y;
x[1] = j = 0;
p = c + 1; xp = x + 2;
while (--i) {
*xp++ = (j += *p++);
}
p = b; i = 0;
do {
if ((j = *p++) != 0)
v[x[j]++] = i;
} while (++i < n);
n = x[g];
x[0] = i = 0;
p = v;
h = -1;
w = -l;
u[0] = (struct huft *)NULL;
q = (struct huft *)NULL;
z = 0;
for (; k <= g; k++)
{
a = c[k];
while (a--)
{
while (k > w + l)
{
h++;
w += l;
z = (z = g - w) > (unsigned)l ? l : z;
if ((f = 1 << (j = k - w)) > a + 1)
{
f -= a + 1;
xp = c + k;
if (j < z)
while (++j < z)
{
if ((f <<= 1) <= *++xp)
break;
f -= *xp;
}
}
z = 1 << j;
if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
(struct huft *)NULL)
{
if (h)
huft_free(u[0]);
return 3;
}
hufts += z + 1;
*t = q + 1;
*(t = &(q->v.t)) = (struct huft *)NULL;
u[h] = ++q;
if (h)
{
x[h] = i;
r.b = (uch)l;
r.e = (uch)(16 + j);
r.v.t = q;
j = i >> (w - l);
u[h-1][j] = r;
}
}
r.b = (uch)(k - w);
if (p >= v + n)
r.e = 99;
else if (*p < s)
{
r.e = (uch)(*p < 256 ? 16 : 15);
r.v.n = (ush)(*p);
p++;
}
else
{
r.e = (uch)e[*p - s];
r.v.n = d[*p++ - s];
}
f = 1 << (k - w);
for (j = i >> w; j < z; j += f)
q[j] = r;
for (j = 1 << (k - 1); i & j; j >>= 1)
i ^= j;
i ^= j;
while ((i & ((1 << w) - 1)) != x[h])
{
h--;
w -= l;
}
}
}
return y != 0 && g != 1;
} | 1,403 |
0 | static bool Curl_isunreserved ( unsigned char in ) {
switch ( in ) {
case '0' : case '1' : case '2' : case '3' : case '4' : case '5' : case '6' : case '7' : case '8' : case '9' : case 'a' : case 'b' : case 'c' : case 'd' : case 'e' : case 'f' : case 'g' : case 'h' : case 'i' : case 'j' : case 'k' : case 'l' : case 'm' : case 'n' : case 'o' : case 'p' : case 'q' : case 'r' : case 's' : case 't' : case 'u' : case 'v' : case 'w' : case 'x' : case 'y' : case 'z' : case 'A' : case 'B' : case 'C' : case 'D' : case 'E' : case 'F' : case 'G' : case 'H' : case 'I' : case 'J' : case 'K' : case 'L' : case 'M' : case 'N' : case 'O' : case 'P' : case 'Q' : case 'R' : case 'S' : case 'T' : case 'U' : case 'V' : case 'W' : case 'X' : case 'Y' : case 'Z' : case '-' : case '.' : case '_' : case '~' : return TRUE ;
default : break ;
}
return FALSE ;
} | static bool Curl_isunreserved ( unsigned char in ) {
switch ( in ) {
case '0' : case '1' : case '2' : case '3' : case '4' : case '5' : case '6' : case '7' : case '8' : case '9' : case 'a' : case 'b' : case 'c' : case 'd' : case 'e' : case 'f' : case 'g' : case 'h' : case 'i' : case 'j' : case 'k' : case 'l' : case 'm' : case 'n' : case 'o' : case 'p' : case 'q' : case 'r' : case 's' : case 't' : case 'u' : case 'v' : case 'w' : case 'x' : case 'y' : case 'z' : case 'A' : case 'B' : case 'C' : case 'D' : case 'E' : case 'F' : case 'G' : case 'H' : case 'I' : case 'J' : case 'K' : case 'L' : case 'M' : case 'N' : case 'O' : case 'P' : case 'Q' : case 'R' : case 'S' : case 'T' : case 'U' : case 'V' : case 'W' : case 'X' : case 'Y' : case 'Z' : case '-' : case '.' : case '_' : case '~' : return TRUE ;
default : break ;
}
return FALSE ;
} | 1,404 |
0 | video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
void *mbuf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
/* Copy arguments into temp kernel buffer */
if (_IOC_DIR(cmd) != _IOC_NONE) {
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
/* too big to allocate from stack */
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
unsigned long n = cmd_input_size(cmd);
if (copy_from_user(parg, (void __user *)arg, n))
goto out;
/* zero out anything we don't copy from userspace */
if (n < _IOC_SIZE(cmd))
memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
} else {
/* read-only ioctl */
memset(parg, 0, _IOC_SIZE(cmd));
}
}
err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
if (err < 0)
goto out;
has_array_args = err;
if (has_array_args) {
/*
* When adding new types of array args, make sure that the
* parent argument to ioctl (which contains the pointer to the
* array) fits into sbuf (so that mbuf will still remain
* unused up to here).
*/
mbuf = kmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_array_args;
err = -EFAULT;
if (copy_from_user(mbuf, user_ptr, array_size))
goto out_array_args;
*kernel_ptr = mbuf;
}
/* Handles IOCTL */
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (has_array_args) {
*kernel_ptr = user_ptr;
if (copy_to_user(user_ptr, mbuf, array_size))
err = -EFAULT;
goto out_array_args;
}
if (err < 0)
goto out;
out_array_args:
/* Copy results into user buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
} | video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
void *mbuf = NULL;
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
if (_IOC_DIR(cmd) != _IOC_NONE) {
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
unsigned long n = cmd_input_size(cmd);
if (copy_from_user(parg, (void __user *)arg, n))
goto out;
if (n < _IOC_SIZE(cmd))
memset((u8 *)parg + n, 0, _IOC_SIZE(cmd) - n);
} else {
memset(parg, 0, _IOC_SIZE(cmd));
}
}
err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
if (err < 0)
goto out;
has_array_args = err;
if (has_array_args) {
mbuf = kmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_array_args;
err = -EFAULT;
if (copy_from_user(mbuf, user_ptr, array_size))
goto out_array_args;
*kernel_ptr = mbuf;
}
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (has_array_args) {
*kernel_ptr = user_ptr;
if (copy_to_user(user_ptr, mbuf, array_size))
err = -EFAULT;
goto out_array_args;
}
if (err < 0)
goto out;
out_array_args:
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
} | 1,405 |
1 | static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma, int current_index, uint64_t current_addr, uint64_t length) { struct ibv_sge sge; struct ibv_send_wr send_wr = { 0 }; struct ibv_send_wr *bad_wr; int reg_result_idx, ret, count = 0; uint64_t chunk, chunks; uint8_t *chunk_start, *chunk_end; RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); RDMARegister reg; RDMARegisterResult *reg_result; RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT }; RDMAControlHeader head = { .len = sizeof(RDMARegister), .type = RDMA_CONTROL_REGISTER_REQUEST, .repeat = 1, }; retry: sge.addr = (uint64_t)(block->local_host_addr + (current_addr - block->offset)); sge.length = length; chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr); chunk_start = ram_chunk_start(block, chunk); if (block->is_ram_block) { chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { chunks--; } } else { chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { chunks--; } } DDPRINTF("Writing %" PRIu64 " chunks, (%" PRIu64 " MB)\n", chunks + 1, (chunks + 1) * (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); chunk_end = ram_chunk_end(block, chunk + chunks); if (!rdma->pin_all) { #ifdef RDMA_UNREGISTRATION_EXAMPLE qemu_rdma_unregister_waiting(rdma); #endif } while (test_bit(chunk, block->transit_bitmap)) { (void)count; DDPRINTF("(%d) Not clobbering: block: %d chunk %" PRIu64 " current %" PRIu64 " len %" PRIu64 " %d %d\n", count++, current_index, chunk, sge.addr, length, rdma->nb_sent, block->nb_chunks); ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); if (ret < 0) { fprintf(stderr, "Failed to Wait for previous write to complete " "block %d chunk %" PRIu64 " current %" PRIu64 " len %" PRIu64 " %d\n", current_index, chunk, sge.addr, length, rdma->nb_sent); return ret; } } if (!rdma->pin_all || !block->is_ram_block) { if (!block->remote_keys[chunk]) { /* * This chunk has not yet been registered, so first check to see * if the entire chunk is zero. If so, tell the other size to * memset() + madvise() the entire chunk without RDMA. */ if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length) && buffer_find_nonzero_offset((void *)sge.addr, length) == length) { RDMACompress comp = { .offset = current_addr, .value = 0, .block_idx = current_index, .length = length, }; head.len = sizeof(comp); head.type = RDMA_CONTROL_COMPRESS; DDPRINTF("Entire chunk is zero, sending compress: %" PRIu64 " for %d " "bytes, index: %d, offset: %" PRId64 "...\n", chunk, sge.length, current_index, current_addr); compress_to_network(&comp); ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &comp, NULL, NULL, NULL); if (ret < 0) { return -EIO; } acct_update_position(f, sge.length, true); return 1; } /* * Otherwise, tell other side to register. */ reg.current_index = current_index; if (block->is_ram_block) { reg.key.current_addr = current_addr; } else { reg.key.chunk = chunk; } reg.chunks = chunks; DDPRINTF("Sending registration request chunk %" PRIu64 " for %d " "bytes, index: %d, offset: %" PRId64 "...\n", chunk, sge.length, current_index, current_addr); register_to_network(®); ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®, &resp, ®_result_idx, NULL); if (ret < 0) { return ret; } /* try to overlap this single registration with the one we sent. */ if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *) sge.addr, &sge.lkey, NULL, chunk, chunk_start, chunk_end)) { fprintf(stderr, "cannot get lkey!\n"); return -EINVAL; } reg_result = (RDMARegisterResult *) rdma->wr_data[reg_result_idx].control_curr; network_to_result(reg_result); DDPRINTF("Received registration result:" " my key: %x their key %x, chunk %" PRIu64 "\n", block->remote_keys[chunk], reg_result->rkey, chunk); block->remote_keys[chunk] = reg_result->rkey; block->remote_host_addr = reg_result->host_addr; } else { /* already registered before */ if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr, &sge.lkey, NULL, chunk, chunk_start, chunk_end)) { fprintf(stderr, "cannot get lkey!\n"); return -EINVAL; } } send_wr.wr.rdma.rkey = block->remote_keys[chunk]; } else { send_wr.wr.rdma.rkey = block->remote_rkey; if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr, &sge.lkey, NULL, chunk, chunk_start, chunk_end)) { fprintf(stderr, "cannot get lkey!\n"); return -EINVAL; } } /* * Encode the ram block index and chunk within this wrid. * We will use this information at the time of completion * to figure out which bitmap to check against and then which * chunk in the bitmap to look for. */ send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE, current_index, chunk); send_wr.opcode = IBV_WR_RDMA_WRITE; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.sg_list = &sge; send_wr.num_sge = 1; send_wr.wr.rdma.remote_addr = block->remote_host_addr + (current_addr - block->offset); DDDPRINTF("Posting chunk: %" PRIu64 ", addr: %lx" " remote: %lx, bytes %" PRIu32 "\n", chunk, sge.addr, send_wr.wr.rdma.remote_addr, sge.length); /* * ibv_post_send() does not return negative error numbers, * per the specification they are positive - no idea why. */ ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr); if (ret == ENOMEM) { DDPRINTF("send queue is full. wait a little....\n"); ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); if (ret < 0) { fprintf(stderr, "rdma migration: failed to make " "room in full send queue! %d\n", ret); return ret; } goto retry; } else if (ret > 0) { perror("rdma migration: post rdma write failed"); return -ret; } set_bit(chunk, block->transit_bitmap); acct_update_position(f, sge.length, false); rdma->total_writes++; return 0; } | static int qemu_rdma_write_one(QEMUFile *f, RDMAContext *rdma, int current_index, uint64_t current_addr, uint64_t length) { struct ibv_sge sge; struct ibv_send_wr send_wr = { 0 }; struct ibv_send_wr *bad_wr; int reg_result_idx, ret, count = 0; uint64_t chunk, chunks; uint8_t *chunk_start, *chunk_end; RDMALocalBlock *block = &(rdma->local_ram_blocks.block[current_index]); RDMARegister reg; RDMARegisterResult *reg_result; RDMAControlHeader resp = { .type = RDMA_CONTROL_REGISTER_RESULT }; RDMAControlHeader head = { .len = sizeof(RDMARegister), .type = RDMA_CONTROL_REGISTER_REQUEST, .repeat = 1, }; retry: sge.addr = (uint64_t)(block->local_host_addr + (current_addr - block->offset)); sge.length = length; chunk = ram_chunk_index(block->local_host_addr, (uint8_t *) sge.addr); chunk_start = ram_chunk_start(block, chunk); if (block->is_ram_block) { chunks = length / (1UL << RDMA_REG_CHUNK_SHIFT); if (chunks && ((length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { chunks--; } } else { chunks = block->length / (1UL << RDMA_REG_CHUNK_SHIFT); if (chunks && ((block->length % (1UL << RDMA_REG_CHUNK_SHIFT)) == 0)) { chunks--; } } DDPRINTF("Writing %" PRIu64 " chunks, (%" PRIu64 " MB)\n", chunks + 1, (chunks + 1) * (1UL << RDMA_REG_CHUNK_SHIFT) / 1024 / 1024); chunk_end = ram_chunk_end(block, chunk + chunks); if (!rdma->pin_all) { #ifdef RDMA_UNREGISTRATION_EXAMPLE qemu_rdma_unregister_waiting(rdma); #endif } while (test_bit(chunk, block->transit_bitmap)) { (void)count; DDPRINTF("(%d) Not clobbering: block: %d chunk %" PRIu64 " current %" PRIu64 " len %" PRIu64 " %d %d\n", count++, current_index, chunk, sge.addr, length, rdma->nb_sent, block->nb_chunks); ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); if (ret < 0) { fprintf(stderr, "Failed to Wait for previous write to complete " "block %d chunk %" PRIu64 " current %" PRIu64 " len %" PRIu64 " %d\n", current_index, chunk, sge.addr, length, rdma->nb_sent); return ret; } } if (!rdma->pin_all || !block->is_ram_block) { if (!block->remote_keys[chunk]) { if (can_use_buffer_find_nonzero_offset((void *)sge.addr, length) && buffer_find_nonzero_offset((void *)sge.addr, length) == length) { RDMACompress comp = { .offset = current_addr, .value = 0, .block_idx = current_index, .length = length, }; head.len = sizeof(comp); head.type = RDMA_CONTROL_COMPRESS; DDPRINTF("Entire chunk is zero, sending compress: %" PRIu64 " for %d " "bytes, index: %d, offset: %" PRId64 "...\n", chunk, sge.length, current_index, current_addr); compress_to_network(&comp); ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &comp, NULL, NULL, NULL); if (ret < 0) { return -EIO; } acct_update_position(f, sge.length, true); return 1; } reg.current_index = current_index; if (block->is_ram_block) { reg.key.current_addr = current_addr; } else { reg.key.chunk = chunk; } reg.chunks = chunks; DDPRINTF("Sending registration request chunk %" PRIu64 " for %d " "bytes, index: %d, offset: %" PRId64 "...\n", chunk, sge.length, current_index, current_addr); register_to_network(®); ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®, &resp, ®_result_idx, NULL); if (ret < 0) { return ret; } if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *) sge.addr, &sge.lkey, NULL, chunk, chunk_start, chunk_end)) { fprintf(stderr, "cannot get lkey!\n"); return -EINVAL; } reg_result = (RDMARegisterResult *) rdma->wr_data[reg_result_idx].control_curr; network_to_result(reg_result); DDPRINTF("Received registration result:" " my key: %x their key %x, chunk %" PRIu64 "\n", block->remote_keys[chunk], reg_result->rkey, chunk); block->remote_keys[chunk] = reg_result->rkey; block->remote_host_addr = reg_result->host_addr; } else { if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr, &sge.lkey, NULL, chunk, chunk_start, chunk_end)) { fprintf(stderr, "cannot get lkey!\n"); return -EINVAL; } } send_wr.wr.rdma.rkey = block->remote_keys[chunk]; } else { send_wr.wr.rdma.rkey = block->remote_rkey; if (qemu_rdma_register_and_get_keys(rdma, block, (uint8_t *)sge.addr, &sge.lkey, NULL, chunk, chunk_start, chunk_end)) { fprintf(stderr, "cannot get lkey!\n"); return -EINVAL; } } send_wr.wr_id = qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE, current_index, chunk); send_wr.opcode = IBV_WR_RDMA_WRITE; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.sg_list = &sge; send_wr.num_sge = 1; send_wr.wr.rdma.remote_addr = block->remote_host_addr + (current_addr - block->offset); DDDPRINTF("Posting chunk: %" PRIu64 ", addr: %lx" " remote: %lx, bytes %" PRIu32 "\n", chunk, sge.addr, send_wr.wr.rdma.remote_addr, sge.length); ret = ibv_post_send(rdma->qp, &send_wr, &bad_wr); if (ret == ENOMEM) { DDPRINTF("send queue is full. wait a little....\n"); ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL); if (ret < 0) { fprintf(stderr, "rdma migration: failed to make " "room in full send queue! %d\n", ret); return ret; } goto retry; } else if (ret > 0) { perror("rdma migration: post rdma write failed"); return -ret; } set_bit(chunk, block->transit_bitmap); acct_update_position(f, sge.length, false); rdma->total_writes++; return 0; } | 1,406 |
1 | static bool php_mb_parse_encoding(const Variant& encoding,
mbfl_encoding ***return_list,
int *return_size, bool persistent) {
bool ret;
if (encoding.isArray()) {
ret = php_mb_parse_encoding_array(encoding.toArray(),
return_list, return_size,
persistent ? 1 : 0);
} else {
String enc = encoding.toString();
ret = php_mb_parse_encoding_list(enc.data(), enc.size(),
return_list, return_size,
persistent ? 1 : 0);
}
if (!ret) {
if (return_list && *return_list) {
free(*return_list);
*return_list = nullptr;
}
return_size = 0;
}
return ret;
} | static bool php_mb_parse_encoding(const Variant& encoding,
mbfl_encoding ***return_list,
int *return_size, bool persistent) {
bool ret;
if (encoding.isArray()) {
ret = php_mb_parse_encoding_array(encoding.toArray(),
return_list, return_size,
persistent ? 1 : 0);
} else {
String enc = encoding.toString();
ret = php_mb_parse_encoding_list(enc.data(), enc.size(),
return_list, return_size,
persistent ? 1 : 0);
}
if (!ret) {
if (return_list && *return_list) {
free(*return_list);
*return_list = nullptr;
}
return_size = 0;
}
return ret;
} | 1,407 |
1 | static int init_directories(BDRVVVFATState* s, const char* dirname) { bootsector_t* bootsector; mapping_t* mapping; unsigned int i; unsigned int cluster; memset(&(s->first_sectors[0]),0,0x40*0x200); s->cluster_size=s->sectors_per_cluster*0x200; s->cluster_buffer=qemu_malloc(s->cluster_size); /* * The formula: sc = spf+1+spf*spc*(512*8/fat_type), * where sc is sector_count, * spf is sectors_per_fat, * spc is sectors_per_clusters, and * fat_type = 12, 16 or 32. */ i = 1+s->sectors_per_cluster*0x200*8/s->fat_type; s->sectors_per_fat=(s->sector_count+i)/i; /* round up */ array_init(&(s->mapping),sizeof(mapping_t)); array_init(&(s->directory),sizeof(direntry_t)); /* add volume label */ { direntry_t* entry=array_get_next(&(s->directory)); entry->attributes=0x28; /* archive | volume label */ snprintf((char*)entry->name,11,"QEMU VVFAT"); } /* Now build FAT, and write back information into directory */ init_fat(s); s->faked_sectors=s->first_sectors_number+s->sectors_per_fat*2; s->cluster_count=sector2cluster(s, s->sector_count); mapping = array_get_next(&(s->mapping)); mapping->begin = 0; mapping->dir_index = 0; mapping->info.dir.parent_mapping_index = -1; mapping->first_mapping_index = -1; mapping->path = strdup(dirname); i = strlen(mapping->path); if (i > 0 && mapping->path[i - 1] == '/') mapping->path[i - 1] = '\0'; mapping->mode = MODE_DIRECTORY; mapping->read_only = 0; s->path = mapping->path; for (i = 0, cluster = 0; i < s->mapping.next; i++) { /* MS-DOS expects the FAT to be 0 for the root directory * (except for the media byte). */ /* LATER TODO: still true for FAT32? */ int fix_fat = (i != 0); mapping = array_get(&(s->mapping), i); if (mapping->mode & MODE_DIRECTORY) { mapping->begin = cluster; if(read_directory(s, i)) { fprintf(stderr, "Could not read directory %s\n", mapping->path); return -1; } mapping = array_get(&(s->mapping), i); } else { assert(mapping->mode == MODE_UNDEFINED); mapping->mode=MODE_NORMAL; mapping->begin = cluster; if (mapping->end > 0) { direntry_t* direntry = array_get(&(s->directory), mapping->dir_index); mapping->end = cluster + 1 + (mapping->end-1)/s->cluster_size; set_begin_of_direntry(direntry, mapping->begin); } else { mapping->end = cluster + 1; fix_fat = 0; } } assert(mapping->begin < mapping->end); /* next free cluster */ cluster = mapping->end; if(cluster > s->cluster_count) { fprintf(stderr,"Directory does not fit in FAT%d (capacity %s)\n", s->fat_type, s->fat_type == 12 ? s->sector_count == 2880 ? "1.44 MB" : "2.88 MB" : "504MB"); return -EINVAL; } /* fix fat for entry */ if (fix_fat) { int j; for(j = mapping->begin; j < mapping->end - 1; j++) fat_set(s, j, j+1); fat_set(s, mapping->end - 1, s->max_fat_value); } } mapping = array_get(&(s->mapping), 0); s->sectors_of_root_directory = mapping->end * s->sectors_per_cluster; s->last_cluster_of_root_directory = mapping->end; /* the FAT signature */ fat_set(s,0,s->max_fat_value); fat_set(s,1,s->max_fat_value); s->current_mapping = NULL; bootsector=(bootsector_t*)(s->first_sectors+(s->first_sectors_number-1)*0x200); bootsector->jump[0]=0xeb; bootsector->jump[1]=0x3e; bootsector->jump[2]=0x90; memcpy(bootsector->name,"QEMU ",8); bootsector->sector_size=cpu_to_le16(0x200); bootsector->sectors_per_cluster=s->sectors_per_cluster; bootsector->reserved_sectors=cpu_to_le16(1); bootsector->number_of_fats=0x2; /* number of FATs */ bootsector->root_entries=cpu_to_le16(s->sectors_of_root_directory*0x10); bootsector->total_sectors16=s->sector_count>0xffff?0:cpu_to_le16(s->sector_count); bootsector->media_type=(s->fat_type!=12?0xf8:s->sector_count==5760?0xf9:0xf8); /* media descriptor */ s->fat.pointer[0] = bootsector->media_type; bootsector->sectors_per_fat=cpu_to_le16(s->sectors_per_fat); bootsector->sectors_per_track=cpu_to_le16(s->bs->secs); bootsector->number_of_heads=cpu_to_le16(s->bs->heads); bootsector->hidden_sectors=cpu_to_le32(s->first_sectors_number==1?0:0x3f); bootsector->total_sectors=cpu_to_le32(s->sector_count>0xffff?s->sector_count:0); /* LATER TODO: if FAT32, this is wrong */ bootsector->u.fat16.drive_number=s->fat_type==12?0:0x80; /* assume this is hda (TODO) */ bootsector->u.fat16.current_head=0; bootsector->u.fat16.signature=0x29; bootsector->u.fat16.id=cpu_to_le32(0xfabe1afd); memcpy(bootsector->u.fat16.volume_label,"QEMU VVFAT ",11); memcpy(bootsector->fat_type,(s->fat_type==12?"FAT12 ":s->fat_type==16?"FAT16 ":"FAT32 "),8); bootsector->magic[0]=0x55; bootsector->magic[1]=0xaa; return 0; } | static int init_directories(BDRVVVFATState* s, const char* dirname) { bootsector_t* bootsector; mapping_t* mapping; unsigned int i; unsigned int cluster; memset(&(s->first_sectors[0]),0,0x40*0x200); s->cluster_size=s->sectors_per_cluster*0x200; s->cluster_buffer=qemu_malloc(s->cluster_size); i = 1+s->sectors_per_cluster*0x200*8/s->fat_type; s->sectors_per_fat=(s->sector_count+i)/i; array_init(&(s->mapping),sizeof(mapping_t)); array_init(&(s->directory),sizeof(direntry_t)); { direntry_t* entry=array_get_next(&(s->directory)); entry->attributes=0x28; snprintf((char*)entry->name,11,"QEMU VVFAT"); } init_fat(s); s->faked_sectors=s->first_sectors_number+s->sectors_per_fat*2; s->cluster_count=sector2cluster(s, s->sector_count); mapping = array_get_next(&(s->mapping)); mapping->begin = 0; mapping->dir_index = 0; mapping->info.dir.parent_mapping_index = -1; mapping->first_mapping_index = -1; mapping->path = strdup(dirname); i = strlen(mapping->path); if (i > 0 && mapping->path[i - 1] == '/') mapping->path[i - 1] = '\0'; mapping->mode = MODE_DIRECTORY; mapping->read_only = 0; s->path = mapping->path; for (i = 0, cluster = 0; i < s->mapping.next; i++) { int fix_fat = (i != 0); mapping = array_get(&(s->mapping), i); if (mapping->mode & MODE_DIRECTORY) { mapping->begin = cluster; if(read_directory(s, i)) { fprintf(stderr, "Could not read directory %s\n", mapping->path); return -1; } mapping = array_get(&(s->mapping), i); } else { assert(mapping->mode == MODE_UNDEFINED); mapping->mode=MODE_NORMAL; mapping->begin = cluster; if (mapping->end > 0) { direntry_t* direntry = array_get(&(s->directory), mapping->dir_index); mapping->end = cluster + 1 + (mapping->end-1)/s->cluster_size; set_begin_of_direntry(direntry, mapping->begin); } else { mapping->end = cluster + 1; fix_fat = 0; } } assert(mapping->begin < mapping->end); cluster = mapping->end; if(cluster > s->cluster_count) { fprintf(stderr,"Directory does not fit in FAT%d (capacity %s)\n", s->fat_type, s->fat_type == 12 ? s->sector_count == 2880 ? "1.44 MB" : "2.88 MB" : "504MB"); return -EINVAL; } if (fix_fat) { int j; for(j = mapping->begin; j < mapping->end - 1; j++) fat_set(s, j, j+1); fat_set(s, mapping->end - 1, s->max_fat_value); } } mapping = array_get(&(s->mapping), 0); s->sectors_of_root_directory = mapping->end * s->sectors_per_cluster; s->last_cluster_of_root_directory = mapping->end; fat_set(s,0,s->max_fat_value); fat_set(s,1,s->max_fat_value); s->current_mapping = NULL; bootsector=(bootsector_t*)(s->first_sectors+(s->first_sectors_number-1)*0x200); bootsector->jump[0]=0xeb; bootsector->jump[1]=0x3e; bootsector->jump[2]=0x90; memcpy(bootsector->name,"QEMU ",8); bootsector->sector_size=cpu_to_le16(0x200); bootsector->sectors_per_cluster=s->sectors_per_cluster; bootsector->reserved_sectors=cpu_to_le16(1); bootsector->number_of_fats=0x2; bootsector->root_entries=cpu_to_le16(s->sectors_of_root_directory*0x10); bootsector->total_sectors16=s->sector_count>0xffff?0:cpu_to_le16(s->sector_count); bootsector->media_type=(s->fat_type!=12?0xf8:s->sector_count==5760?0xf9:0xf8); s->fat.pointer[0] = bootsector->media_type; bootsector->sectors_per_fat=cpu_to_le16(s->sectors_per_fat); bootsector->sectors_per_track=cpu_to_le16(s->bs->secs); bootsector->number_of_heads=cpu_to_le16(s->bs->heads); bootsector->hidden_sectors=cpu_to_le32(s->first_sectors_number==1?0:0x3f); bootsector->total_sectors=cpu_to_le32(s->sector_count>0xffff?s->sector_count:0); bootsector->u.fat16.drive_number=s->fat_type==12?0:0x80; bootsector->u.fat16.current_head=0; bootsector->u.fat16.signature=0x29; bootsector->u.fat16.id=cpu_to_le32(0xfabe1afd); memcpy(bootsector->u.fat16.volume_label,"QEMU VVFAT ",11); memcpy(bootsector->fat_type,(s->fat_type==12?"FAT12 ":s->fat_type==16?"FAT16 ":"FAT32 "),8); bootsector->magic[0]=0x55; bootsector->magic[1]=0xaa; return 0; } | 1,409 |
0 | static bool php_mb_parse_encoding(const Variant& encoding,
mbfl_encoding ***return_list,
int *return_size, bool persistent) {
bool ret;
if (encoding.isArray()) {
ret = php_mb_parse_encoding_array(encoding.toArray(),
return_list, return_size,
persistent ? 1 : 0);
} else {
String enc = encoding.toString();
ret = php_mb_parse_encoding_list(enc.data(), enc.size(),
return_list, return_size,
persistent ? 1 : 0);
}
if (!ret) {
if (return_list && *return_list) {
req::free(*return_list);
*return_list = nullptr;
}
return_size = 0;
}
return ret;
} | static bool php_mb_parse_encoding(const Variant& encoding,
mbfl_encoding ***return_list,
int *return_size, bool persistent) {
bool ret;
if (encoding.isArray()) {
ret = php_mb_parse_encoding_array(encoding.toArray(),
return_list, return_size,
persistent ? 1 : 0);
} else {
String enc = encoding.toString();
ret = php_mb_parse_encoding_list(enc.data(), enc.size(),
return_list, return_size,
persistent ? 1 : 0);
}
if (!ret) {
if (return_list && *return_list) {
req::free(*return_list);
*return_list = nullptr;
}
return_size = 0;
}
return ret;
} | 1,410 |
0 | static int x8_decode_intra_mb ( IntraX8Context * const w , const int chroma ) {
MpegEncContext * const s = w -> s ;
uint8_t * scantable ;
int final , run , level ;
int ac_mode , dc_mode , est_run , dc_level ;
int pos , n ;
int zeros_only ;
int use_quant_matrix ;
int sign ;
assert ( w -> orient < 12 ) ;
s -> dsp . clear_block ( s -> block [ 0 ] ) ;
if ( chroma ) {
dc_mode = 2 ;
}
else {
dc_mode = ! ! w -> est_run ;
}
if ( x8_get_dc_rlf ( w , dc_mode , & dc_level , & final ) ) return - 1 ;
n = 0 ;
zeros_only = 0 ;
if ( ! final ) {
use_quant_matrix = w -> use_quant_matrix ;
if ( chroma ) {
ac_mode = 1 ;
est_run = 64 ;
/ ot used }
else {
if ( w -> raw_orient < 3 ) {
use_quant_matrix = 0 ;
}
if ( w -> raw_orient > 4 ) {
ac_mode = 0 ;
est_run = 64 ;
}
else {
if ( w -> est_run > 1 ) {
ac_mode = 2 ;
est_run = w -> est_run ;
}
else {
ac_mode = 3 ;
est_run = 64 ;
}
}
}
x8_select_ac_table ( w , ac_mode ) ;
scantable = w -> scantable [ ( 0x928548 >> ( 2 * w -> orient ) ) & 3 ] . permutated ;
pos = 0 ;
do {
n ++ ;
if ( n >= est_run ) {
ac_mode = 3 ;
x8_select_ac_table ( w , 3 ) ;
}
x8_get_ac_rlf ( w , ac_mode , & run , & level , & final ) ;
pos += run + 1 ;
if ( pos > 63 ) {
return - 1 ;
}
level = ( level + 1 ) * w -> dquant ;
level += w -> qsum ;
sign = - get_bits1 ( & s -> gb ) ;
level = ( level ^ sign ) - sign ;
if ( use_quant_matrix ) {
level = ( level * quant_table [ pos ] ) >> 8 ;
}
s -> block [ 0 ] [ scantable [ pos ] ] = level ;
}
while ( ! final ) ;
s -> block_last_index [ 0 ] = pos ;
}
else {
s -> block_last_index [ 0 ] = 0 ;
if ( w -> flat_dc && ( ( unsigned ) ( dc_level + 1 ) ) < 3 ) {
int32_t divide_quant = ! chroma ? w -> divide_quant_dc_luma : w -> divide_quant_dc_chroma ;
int32_t dc_quant = ! chroma ? w -> quant : w -> quant_dc_chroma ;
dc_level += ( w -> predicted_dc * divide_quant + ( 1 << 12 ) ) >> 13 ;
dsp_x8_put_solidcolor ( av_clip_uint8 ( ( dc_level * dc_quant + 4 ) >> 3 ) , s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] ) ;
goto block_placed ;
}
zeros_only = ( dc_level == 0 ) ;
}
if ( ! chroma ) {
s -> block [ 0 ] [ 0 ] = dc_level * w -> quant ;
}
else {
s -> block [ 0 ] [ 0 ] = dc_level * w -> quant_dc_chroma ;
}
if ( ( unsigned int ) ( dc_level + 1 ) >= 3 && ( w -> edges & 3 ) != 3 ) {
int direction ;
direction = ( 0x6A017C >> ( w -> orient * 2 ) ) & 3 ;
if ( direction != 3 ) {
x8_ac_compensation ( w , direction , s -> block [ 0 ] [ 0 ] ) ;
}
}
if ( w -> flat_dc ) {
dsp_x8_put_solidcolor ( w -> predicted_dc , s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] ) ;
}
else {
w -> dsp . spatial_compensation [ w -> orient ] ( s -> edge_emu_buffer , s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] ) ;
}
if ( ! zeros_only ) s -> dsp . idct_add ( s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] , s -> block [ 0 ] ) ;
block_placed : if ( ! chroma ) {
x8_update_predictions ( w , w -> orient , n ) ;
}
if ( s -> loop_filter ) {
uint8_t * ptr = s -> dest [ chroma ] ;
int linesize = s -> current_picture . f . linesize [ ! ! chroma ] ;
if ( ! ( ( w -> edges & 2 ) || ( zeros_only && ( w -> orient | 4 ) == 4 ) ) ) {
w -> dsp . h_loop_filter ( ptr , linesize , w -> quant ) ;
}
if ( ! ( ( w -> edges & 1 ) || ( zeros_only && ( w -> orient | 8 ) == 8 ) ) ) {
w -> dsp . v_loop_filter ( ptr , linesize , w -> quant ) ;
}
}
return 0 ;
} | static int x8_decode_intra_mb ( IntraX8Context * const w , const int chroma ) {
MpegEncContext * const s = w -> s ;
uint8_t * scantable ;
int final , run , level ;
int ac_mode , dc_mode , est_run , dc_level ;
int pos , n ;
int zeros_only ;
int use_quant_matrix ;
int sign ;
assert ( w -> orient < 12 ) ;
s -> dsp . clear_block ( s -> block [ 0 ] ) ;
if ( chroma ) {
dc_mode = 2 ;
}
else {
dc_mode = ! ! w -> est_run ;
}
if ( x8_get_dc_rlf ( w , dc_mode , & dc_level , & final ) ) return - 1 ;
n = 0 ;
zeros_only = 0 ;
if ( ! final ) {
use_quant_matrix = w -> use_quant_matrix ;
if ( chroma ) {
ac_mode = 1 ;
est_run = 64 ;
/ ot used }
else {
if ( w -> raw_orient < 3 ) {
use_quant_matrix = 0 ;
}
if ( w -> raw_orient > 4 ) {
ac_mode = 0 ;
est_run = 64 ;
}
else {
if ( w -> est_run > 1 ) {
ac_mode = 2 ;
est_run = w -> est_run ;
}
else {
ac_mode = 3 ;
est_run = 64 ;
}
}
}
x8_select_ac_table ( w , ac_mode ) ;
scantable = w -> scantable [ ( 0x928548 >> ( 2 * w -> orient ) ) & 3 ] . permutated ;
pos = 0 ;
do {
n ++ ;
if ( n >= est_run ) {
ac_mode = 3 ;
x8_select_ac_table ( w , 3 ) ;
}
x8_get_ac_rlf ( w , ac_mode , & run , & level , & final ) ;
pos += run + 1 ;
if ( pos > 63 ) {
return - 1 ;
}
level = ( level + 1 ) * w -> dquant ;
level += w -> qsum ;
sign = - get_bits1 ( & s -> gb ) ;
level = ( level ^ sign ) - sign ;
if ( use_quant_matrix ) {
level = ( level * quant_table [ pos ] ) >> 8 ;
}
s -> block [ 0 ] [ scantable [ pos ] ] = level ;
}
while ( ! final ) ;
s -> block_last_index [ 0 ] = pos ;
}
else {
s -> block_last_index [ 0 ] = 0 ;
if ( w -> flat_dc && ( ( unsigned ) ( dc_level + 1 ) ) < 3 ) {
int32_t divide_quant = ! chroma ? w -> divide_quant_dc_luma : w -> divide_quant_dc_chroma ;
int32_t dc_quant = ! chroma ? w -> quant : w -> quant_dc_chroma ;
dc_level += ( w -> predicted_dc * divide_quant + ( 1 << 12 ) ) >> 13 ;
dsp_x8_put_solidcolor ( av_clip_uint8 ( ( dc_level * dc_quant + 4 ) >> 3 ) , s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] ) ;
goto block_placed ;
}
zeros_only = ( dc_level == 0 ) ;
}
if ( ! chroma ) {
s -> block [ 0 ] [ 0 ] = dc_level * w -> quant ;
}
else {
s -> block [ 0 ] [ 0 ] = dc_level * w -> quant_dc_chroma ;
}
if ( ( unsigned int ) ( dc_level + 1 ) >= 3 && ( w -> edges & 3 ) != 3 ) {
int direction ;
direction = ( 0x6A017C >> ( w -> orient * 2 ) ) & 3 ;
if ( direction != 3 ) {
x8_ac_compensation ( w , direction , s -> block [ 0 ] [ 0 ] ) ;
}
}
if ( w -> flat_dc ) {
dsp_x8_put_solidcolor ( w -> predicted_dc , s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] ) ;
}
else {
w -> dsp . spatial_compensation [ w -> orient ] ( s -> edge_emu_buffer , s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] ) ;
}
if ( ! zeros_only ) s -> dsp . idct_add ( s -> dest [ chroma ] , s -> current_picture . f . linesize [ ! ! chroma ] , s -> block [ 0 ] ) ;
block_placed : if ( ! chroma ) {
x8_update_predictions ( w , w -> orient , n ) ;
}
if ( s -> loop_filter ) {
uint8_t * ptr = s -> dest [ chroma ] ;
int linesize = s -> current_picture . f . linesize [ ! ! chroma ] ;
if ( ! ( ( w -> edges & 2 ) || ( zeros_only && ( w -> orient | 4 ) == 4 ) ) ) {
w -> dsp . h_loop_filter ( ptr , linesize , w -> quant ) ;
}
if ( ! ( ( w -> edges & 1 ) || ( zeros_only && ( w -> orient | 8 ) == 8 ) ) ) {
w -> dsp . v_loop_filter ( ptr , linesize , w -> quant ) ;
}
}
return 0 ;
} | 1,411 |