target
int64 0
1
| func
stringlengths 7
484k
| func_no_comments
stringlengths 7
484k
| idx
int64 1
368k
|
---|---|---|---|
1 | while(1) {
/* Add the Unix Domain Sockets to the list of read
* descriptors.
* rgerhards 2005-08-01: we must now check if there are
* any local sockets to listen to at all. If the -o option
* is given without -a, we do not need to listen at all..
*/
maxfds = 0;
FD_ZERO (&readfds);
/* Add the UDP listen sockets to the list of read descriptors.
*/
if(udpLstnSocks != NULL) {
for (i = 0; i < *udpLstnSocks; i++) {
if (udpLstnSocks[i+1] != -1) {
if(Debug)
net.debugListenInfo(udpLstnSocks[i+1], "UDP");
FD_SET(udpLstnSocks[i+1], &readfds);
if(udpLstnSocks[i+1]>maxfds) maxfds=udpLstnSocks[i+1];
}
}
}
if(Debug) {
dbgprintf("--------imUDP calling select, active file descriptors (max %d): ", maxfds);
for (nfds = 0; nfds <= maxfds; ++nfds)
if ( FD_ISSET(nfds, &readfds) )
dbgprintf("%d ", nfds);
dbgprintf("\n");
}
/* wait for io to become ready */
nfds = select(maxfds+1, (fd_set *) &readfds, NULL, NULL, NULL);
if(udpLstnSocks != NULL) {
for (i = 0; nfds && i < *udpLstnSocks; i++) {
if (FD_ISSET(udpLstnSocks[i+1], &readfds)) {
socklen = sizeof(frominet);
l = recvfrom(udpLstnSocks[i+1], (char*) pRcvBuf, MAXLINE - 1, 0,
(struct sockaddr *)&frominet, &socklen);
if (l > 0) {
if(net.cvthname(&frominet, fromHost, fromHostFQDN, fromHostIP) == RS_RET_OK) {
dbgprintf("Message from inetd socket: #%d, host: %s\n",
udpLstnSocks[i+1], fromHost);
/* Here we check if a host is permitted to send us
* syslog messages. If it isn't, we do not further
* process the message but log a warning (if we are
* configured to do this).
* rgerhards, 2005-09-26
*/
if(net.isAllowedSender((uchar*) "UDP",
(struct sockaddr *)&frominet, (char*)fromHostFQDN)) {
parseAndSubmitMessage(fromHost, fromHostIP, pRcvBuf, l,
MSG_PARSE_HOSTNAME, NOFLAG, eFLOWCTL_NO_DELAY);
} else {
dbgprintf("%s is not an allowed sender\n", (char*)fromHostFQDN);
if(glbl.GetOption_DisallowWarning) {
errmsg.LogError(0, NO_ERRCODE, "UDP message from disallowed sender %s discarded",
(char*)fromHost);
}
}
}
} else if (l < 0 && errno != EINTR && errno != EAGAIN) {
char errStr[1024];
rs_strerror_r(errno, errStr, sizeof(errStr));
dbgprintf("INET socket error: %d = %s.\n", errno, errStr);
errmsg.LogError(errno, NO_ERRCODE, "recvfrom inet");
/* should be harmless */
sleep(1);
}
--nfds; /* indicate we have processed one */
}
}
}
} | while(1) {
maxfds = 0;
FD_ZERO (&readfds);
if(udpLstnSocks != NULL) {
for (i = 0; i < *udpLstnSocks; i++) {
if (udpLstnSocks[i+1] != -1) {
if(Debug)
net.debugListenInfo(udpLstnSocks[i+1], "UDP");
FD_SET(udpLstnSocks[i+1], &readfds);
if(udpLstnSocks[i+1]>maxfds) maxfds=udpLstnSocks[i+1];
}
}
}
if(Debug) {
dbgprintf("--------imUDP calling select, active file descriptors (max %d): ", maxfds);
for (nfds = 0; nfds <= maxfds; ++nfds)
if ( FD_ISSET(nfds, &readfds) )
dbgprintf("%d ", nfds);
dbgprintf("\n");
}
nfds = select(maxfds+1, (fd_set *) &readfds, NULL, NULL, NULL);
if(udpLstnSocks != NULL) {
for (i = 0; nfds && i < *udpLstnSocks; i++) {
if (FD_ISSET(udpLstnSocks[i+1], &readfds)) {
socklen = sizeof(frominet);
l = recvfrom(udpLstnSocks[i+1], (char*) pRcvBuf, MAXLINE - 1, 0,
(struct sockaddr *)&frominet, &socklen);
if (l > 0) {
if(net.cvthname(&frominet, fromHost, fromHostFQDN, fromHostIP) == RS_RET_OK) {
dbgprintf("Message from inetd socket: #%d, host: %s\n",
udpLstnSocks[i+1], fromHost);
if(net.isAllowedSender((uchar*) "UDP",
(struct sockaddr *)&frominet, (char*)fromHostFQDN)) {
parseAndSubmitMessage(fromHost, fromHostIP, pRcvBuf, l,
MSG_PARSE_HOSTNAME, NOFLAG, eFLOWCTL_NO_DELAY);
} else {
dbgprintf("%s is not an allowed sender\n", (char*)fromHostFQDN);
if(glbl.GetOption_DisallowWarning) {
errmsg.LogError(0, NO_ERRCODE, "UDP message from disallowed sender %s discarded",
(char*)fromHost);
}
}
}
} else if (l < 0 && errno != EINTR && errno != EAGAIN) {
char errStr[1024];
rs_strerror_r(errno, errStr, sizeof(errStr));
dbgprintf("INET socket error: %d = %s.\n", errno, errStr);
errmsg.LogError(errno, NO_ERRCODE, "recvfrom inet");
sleep(1);
}
--nfds;
}
}
}
} | 1,155 |
1 | void __qdisc_run(struct net_device *dev)
{
do {
if (!qdisc_restart(dev))
break;
} while (!netif_queue_stopped(dev));
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
} | void __qdisc_run(struct net_device *dev)
{
do {
if (!qdisc_restart(dev))
break;
} while (!netif_queue_stopped(dev));
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
} | 1,156 |
0 | void initialize(const string &path, bool owner) {
TRACE_POINT();
this->path = path;
this->owner = owner;
/* Create the server instance directory. We only need to write to this
* directory for these reasons:
* 1. Initial population of structure files (structure_version.txt, instance.pid).
* 2. Creating/removing a generation directory.
* 3. Removing the entire server instance directory (after all
* generations are removed).
*
* 1 and 2 are done by the helper server during initialization and before lowering
* privilege. 3 is done during helper server shutdown by a cleanup process that's
* running as the same user the helper server was running as before privilege
* lowering.
* Therefore, we make the directory only writable by the user the helper server
* was running as before privilege is lowered. Everybody else has read and execute
* rights though, because we want admin tools to be able to list the available
* generations no matter what user they're running as.
*/
if (owner) {
switch (getFileType(path)) {
case FT_NONEXISTANT:
createDirectory(path);
break;
case FT_DIRECTORY:
removeDirTree(path);
createDirectory(path);
break;
default:
throw RuntimeException("'" + path + "' already exists, and is not a directory");
}
} else if (getFileType(path) != FT_DIRECTORY) {
throw RuntimeException("Server instance directory '" + path +
"' does not exist");
}
} | void initialize(const string &path, bool owner) {
TRACE_POINT();
this->path = path;
this->owner = owner;
if (owner) {
switch (getFileType(path)) {
case FT_NONEXISTANT:
createDirectory(path);
break;
case FT_DIRECTORY:
removeDirTree(path);
createDirectory(path);
break;
default:
throw RuntimeException("'" + path + "' already exists, and is not a directory");
}
} else if (getFileType(path) != FT_DIRECTORY) {
throw RuntimeException("Server instance directory '" + path +
"' does not exist");
}
} | 1,157 |
0 | static int dissect_pvfs_pdu ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , void * data _U_ ) {
dissect_pvfs_common ( tvb , pinfo , tree , FALSE ) ;
return tvb_reported_length ( tvb ) ;
} | static int dissect_pvfs_pdu ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , void * data _U_ ) {
dissect_pvfs_common ( tvb , pinfo , tree , FALSE ) ;
return tvb_reported_length ( tvb ) ;
} | 1,158 |
0 | int qemuMonitorTextAddDevice ( qemuMonitorPtr mon , const char * devicestr ) {
char * cmd = NULL ;
char * reply = NULL ;
char * safedev ;
int ret = - 1 ;
if ( ! ( safedev = qemuMonitorEscapeArg ( devicestr ) ) ) {
virReportOOMError ( ) ;
goto cleanup ;
}
if ( virAsprintf ( & cmd , "device_add %s" , safedev ) < 0 ) {
virReportOOMError ( ) ;
goto cleanup ;
}
if ( qemuMonitorHMPCommand ( mon , cmd , & reply ) < 0 ) {
qemuReportError ( VIR_ERR_OPERATION_FAILED , _ ( "cannot attach %s device" ) , devicestr ) ;
goto cleanup ;
}
if ( STRPREFIX ( reply , "husb: using" ) ) {
ret = 0 ;
goto cleanup ;
}
if ( STRNEQ ( reply , "" ) ) {
qemuReportError ( VIR_ERR_OPERATION_FAILED , _ ( "adding %s device failed: %s" ) , devicestr , reply ) ;
goto cleanup ;
}
ret = 0 ;
cleanup : VIR_FREE ( cmd ) ;
VIR_FREE ( reply ) ;
VIR_FREE ( safedev ) ;
return ret ;
} | int qemuMonitorTextAddDevice ( qemuMonitorPtr mon , const char * devicestr ) {
char * cmd = NULL ;
char * reply = NULL ;
char * safedev ;
int ret = - 1 ;
if ( ! ( safedev = qemuMonitorEscapeArg ( devicestr ) ) ) {
virReportOOMError ( ) ;
goto cleanup ;
}
if ( virAsprintf ( & cmd , "device_add %s" , safedev ) < 0 ) {
virReportOOMError ( ) ;
goto cleanup ;
}
if ( qemuMonitorHMPCommand ( mon , cmd , & reply ) < 0 ) {
qemuReportError ( VIR_ERR_OPERATION_FAILED , _ ( "cannot attach %s device" ) , devicestr ) ;
goto cleanup ;
}
if ( STRPREFIX ( reply , "husb: using" ) ) {
ret = 0 ;
goto cleanup ;
}
if ( STRNEQ ( reply , "" ) ) {
qemuReportError ( VIR_ERR_OPERATION_FAILED , _ ( "adding %s device failed: %s" ) , devicestr , reply ) ;
goto cleanup ;
}
ret = 0 ;
cleanup : VIR_FREE ( cmd ) ;
VIR_FREE ( reply ) ;
VIR_FREE ( safedev ) ;
return ret ;
} | 1,160 |
1 | void helper_rdmsr(void) { uint64_t val; helper_svm_check_intercept_param(SVM_EXIT_MSR, 0); switch((uint32_t)ECX) { case MSR_IA32_SYSENTER_CS: val = env->sysenter_cs; case MSR_IA32_SYSENTER_ESP: val = env->sysenter_esp; case MSR_IA32_SYSENTER_EIP: val = env->sysenter_eip; case MSR_IA32_APICBASE: val = cpu_get_apic_base(env); case MSR_EFER: val = env->efer; case MSR_STAR: val = env->star; case MSR_PAT: val = env->pat; case MSR_VM_HSAVE_PA: val = env->vm_hsave; case MSR_IA32_PERF_STATUS: /* tsc_increment_by_tick */ val = 1000ULL; /* CPU multiplier */ val |= (((uint64_t)4ULL) << 40); #ifdef TARGET_X86_64 case MSR_LSTAR: val = env->lstar; case MSR_CSTAR: val = env->cstar; case MSR_FMASK: val = env->fmask; case MSR_FSBASE: val = env->segs[R_FS].base; case MSR_GSBASE: val = env->segs[R_GS].base; case MSR_KERNELGSBASE: val = env->kernelgsbase; #endif #ifdef USE_KQEMU case MSR_QPI_COMMBASE: if (env->kqemu_enabled) { val = kqemu_comm_base; } else { val = 0; } #endif default: /* XXX: exception ? */ val = 0; } EAX = (uint32_t)(val); EDX = (uint32_t)(val >> 32); } | void helper_rdmsr(void) { uint64_t val; helper_svm_check_intercept_param(SVM_EXIT_MSR, 0); switch((uint32_t)ECX) { case MSR_IA32_SYSENTER_CS: val = env->sysenter_cs; case MSR_IA32_SYSENTER_ESP: val = env->sysenter_esp; case MSR_IA32_SYSENTER_EIP: val = env->sysenter_eip; case MSR_IA32_APICBASE: val = cpu_get_apic_base(env); case MSR_EFER: val = env->efer; case MSR_STAR: val = env->star; case MSR_PAT: val = env->pat; case MSR_VM_HSAVE_PA: val = env->vm_hsave; case MSR_IA32_PERF_STATUS: val = 1000ULL; val |= (((uint64_t)4ULL) << 40); #ifdef TARGET_X86_64 case MSR_LSTAR: val = env->lstar; case MSR_CSTAR: val = env->cstar; case MSR_FMASK: val = env->fmask; case MSR_FSBASE: val = env->segs[R_FS].base; case MSR_GSBASE: val = env->segs[R_GS].base; case MSR_KERNELGSBASE: val = env->kernelgsbase; #endif #ifdef USE_KQEMU case MSR_QPI_COMMBASE: if (env->kqemu_enabled) { val = kqemu_comm_base; } else { val = 0; } #endif default: val = 0; } EAX = (uint32_t)(val); EDX = (uint32_t)(val >> 32); } | 1,161 |
1 | void initialize(const string &path, bool owner) {
TRACE_POINT();
this->path = path;
this->owner = owner;
/* Create the server instance directory. We only need to write to this
* directory for these reasons:
* 1. Initial population of structure files (structure_version.txt, instance.pid).
* 2. Creating/removing a generation directory.
* 3. Removing the entire server instance directory (after all
* generations are removed).
*
* 1 and 2 are done by the helper server during initialization and before lowering
* privilege. 3 is done during helper server shutdown by a cleanup process that's
* running as the same user the helper server was running as before privilege
* lowering.
* Therefore, we make the directory only writable by the user the helper server
* was running as before privilege is lowered. Everybody else has read and execute
* rights though, because we want admin tools to be able to list the available
* generations no matter what user they're running as.
*/
makeDirTree(path, "u=rwx,g=rx,o=rx");
} | void initialize(const string &path, bool owner) {
TRACE_POINT();
this->path = path;
this->owner = owner;
makeDirTree(path, "u=rwx,g=rx,o=rx");
} | 1,162 |
1 | sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
} | sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
gen_shutdown:
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
} | 1,163 |
1 | cifs_find_smb_ses(struct TCP_Server_Info *server, char *username)
{
struct list_head *tmp;
struct cifsSesInfo *ses;
write_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
if (strncmp(ses->userName, username, MAX_USERNAME_SIZE))
continue;
++ses->ses_count;
write_unlock(&cifs_tcp_ses_lock);
return ses;
}
write_unlock(&cifs_tcp_ses_lock);
return NULL;
} | cifs_find_smb_ses(struct TCP_Server_Info *server, char *username)
{
struct list_head *tmp;
struct cifsSesInfo *ses;
write_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
if (strncmp(ses->userName, username, MAX_USERNAME_SIZE))
continue;
++ses->ses_count;
write_unlock(&cifs_tcp_ses_lock);
return ses;
}
write_unlock(&cifs_tcp_ses_lock);
return NULL;
} | 1,164 |
1 | BlockJobInfoList *qmp_query_block_jobs(Error **errp) { BlockJobInfoList *head = NULL, **p_next = &head; BlockDriverState *bs; BdrvNextIterator *it = NULL; while ((it = bdrv_next(it, &bs))) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (bs->job) { BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1); elem->value = block_job_query(bs->job); *p_next = elem; p_next = &elem->next; } aio_context_release(aio_context); } return head; } | BlockJobInfoList *qmp_query_block_jobs(Error **errp) { BlockJobInfoList *head = NULL, **p_next = &head; BlockDriverState *bs; BdrvNextIterator *it = NULL; while ((it = bdrv_next(it, &bs))) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (bs->job) { BlockJobInfoList *elem = g_new0(BlockJobInfoList, 1); elem->value = block_job_query(bs->job); *p_next = elem; p_next = &elem->next; } aio_context_release(aio_context); } return head; } | 1,166 |
1 | sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
} | sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __func__, tsn);
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
} | 1,167 |
1 | DeviceState *ssi_create_slave(SSIBus *bus, const char *name) { DeviceState *dev; dev = qdev_create(&bus->qbus, name); qdev_init(dev); return dev; } | DeviceState *ssi_create_slave(SSIBus *bus, const char *name) { DeviceState *dev; dev = qdev_create(&bus->qbus, name); qdev_init(dev); return dev; } | 1,169 |
1 | static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
p->rcu_flipctr_idx = 0;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
p->default_timer_slack_ns = current->timer_slack_ns;
#ifdef CONFIG_DETECT_SOFTLOCKUP
p->last_switch_count = 0;
p->last_switch_timestamp = 0;
#endif
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
if (unlikely(current->ptrace))
ptrace_fork(p, clone_flags);
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
if (retval < 0)
goto bad_fork_free_pid;
}
}
ftrace_graph_init_task(p);
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
goto bad_fork_free_graph;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing should be turned off in the child regardless
* of CLONE_PTRACE.
*/
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
p->parent_exec_id = p->self_exec_id;
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/*
* The task hasn't been attached yet, so its cpus_allowed mask will
* not be changed, nor will its assigned CPU.
*
* The cpus_allowed mask of the parent may have changed after it was
* copied first time - so re-copy it here, then check the child's CPU
* to ensure it is on a valid CPU (and if not, just force it back to
* parent's CPU). This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
p->real_parent = current->real_parent;
else
p->real_parent = current;
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_graph;
}
if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
list_add_tail(&p->sibling, &p->real_parent->children);
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
tty_kref_put(p->signal->tty);
p->signal->tty = tty_kref_get(current->signal->tty);
set_task_pgrp(p, task_pgrp_nr(current));
set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
return p;
bad_fork_free_graph:
ftrace_graph_exit_task(p);
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
cleanup_signal(p);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
put_cred(p->real_cred);
put_cred(p->cred);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
} | static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
if (p->binfmt && !try_module_get(p->binfmt->module))
goto bad_fork_cleanup_put_domain;
p->did_exec = 0;
delayacct_tsk_init(p);
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
p->rcu_flipctr_idx = 0;
#endif
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
p->default_timer_slack_ns = current->timer_slack_ns;
#ifdef CONFIG_DETECT_SOFTLOCKUP
p->last_switch_count = 0;
p->last_switch_timestamp = 0;
#endif
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
p->lock_depth = -1;
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0;
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL;
#endif
if (unlikely(current->ptrace))
ptrace_fork(p, clone_flags);
sched_fork(p, clone_flags);
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
if (retval < 0)
goto bad_fork_free_pid;
}
}
ftrace_graph_init_task(p);
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
goto bad_fork_free_graph;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
p->parent_exec_id = p->self_exec_id;
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
write_lock_irq(&tasklist_lock);
p->cpus_allowed = current->cpus_allowed;
p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
!cpu_online(task_cpu(p))))
set_task_cpu(p, smp_processor_id());
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
p->real_parent = current->real_parent;
else
p->real_parent = current;
spin_lock(¤t->sighand->siglock);
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_graph;
}
if (clone_flags & CLONE_THREAD) {
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
list_add_tail(&p->sibling, &p->real_parent->children);
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
tty_kref_put(p->signal->tty);
p->signal->tty = tty_kref_get(current->signal->tty);
set_task_pgrp(p, task_pgrp_nr(current));
set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
return p;
bad_fork_free_graph:
ftrace_graph_exit_task(p);
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
cleanup_signal(p);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p);
bad_fork_cleanup_files:
exit_files(p);
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
put_cred(p->real_cred);
put_cred(p->cred);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
} | 1,170 |
1 | void initialize(const string &path, bool owner) {
TRACE_POINT();
this->path = path;
this->owner = owner;
/* Create the server instance directory. We only need to write to this
* directory for these reasons:
* 1. Initial population of structure files (structure_version.txt, instance.pid).
* 2. Creating/removing a generation directory.
* 3. Removing the entire server instance directory (after all
* generations are removed).
*
* 1 and 2 are done by the helper server during initialization and before lowering
* privilege. 3 is done during helper server shutdown by a cleanup process that's
* running as the same user the helper server was running as before privilege
* lowering.
* Therefore, we make the directory only writable by the user the helper server
* was running as before privilege is lowered. Everybody else has read and execute
* rights though, because we want admin tools to be able to list the available
* generations no matter what user they're running as.
*/
if (owner) {
switch (getFileTypeNoFollowSymlinks(path)) {
case FT_NONEXISTANT:
createDirectory(path);
break;
case FT_DIRECTORY:
verifyDirectoryPermissions(path);
break;
default:
throw RuntimeException("'" + path + "' already exists, and is not a directory");
}
} else if (getFileType(path) != FT_DIRECTORY) {
throw RuntimeException("Server instance directory '" + path +
"' does not exist");
}
} | void initialize(const string &path, bool owner) {
TRACE_POINT();
this->path = path;
this->owner = owner;
if (owner) {
switch (getFileTypeNoFollowSymlinks(path)) {
case FT_NONEXISTANT:
createDirectory(path);
break;
case FT_DIRECTORY:
verifyDirectoryPermissions(path);
break;
default:
throw RuntimeException("'" + path + "' already exists, and is not a directory");
}
} else if (getFileType(path) != FT_DIRECTORY) {
throw RuntimeException("Server instance directory '" + path +
"' does not exist");
}
} | 1,172 |
1 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
int rc = -ENOMEM, xid;
struct cifsSesInfo *ses;
xid = GetXid();
ses = cifs_find_smb_ses(server, volume_info->username);
if (ses) {
cFYI(1, "Existing smb sess found (status=%d)", ses->status);
/* existing SMB ses has a server reference already */
cifs_put_tcp_session(server);
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (rc) {
mutex_unlock(&ses->session_mutex);
/* problem -- put our ses reference */
cifs_put_smb_ses(ses);
FreeXid(xid);
return ERR_PTR(rc);
}
if (ses->need_reconnect) {
cFYI(1, "Session needs reconnect");
rc = cifs_setup_session(xid, ses,
volume_info->local_nls);
if (rc) {
mutex_unlock(&ses->session_mutex);
/* problem -- put our reference */
cifs_put_smb_ses(ses);
FreeXid(xid);
return ERR_PTR(rc);
}
}
mutex_unlock(&ses->session_mutex);
FreeXid(xid);
return ses;
}
cFYI(1, "Existing smb sess not found");
ses = sesInfoAlloc();
if (ses == NULL)
goto get_ses_fail;
/* new SMB session uses our server ref */
ses->server = server;
if (server->addr.sockAddr6.sin6_family == AF_INET6)
sprintf(ses->serverName, "%pI6",
&server->addr.sockAddr6.sin6_addr);
else
sprintf(ses->serverName, "%pI4",
&server->addr.sockAddr.sin_addr.s_addr);
if (volume_info->username)
strncpy(ses->userName, volume_info->username,
MAX_USERNAME_SIZE);
/* volume_info->password freed at unmount */
if (volume_info->password) {
ses->password = kstrdup(volume_info->password, GFP_KERNEL);
if (!ses->password)
goto get_ses_fail;
}
if (volume_info->domainname) {
int len = strlen(volume_info->domainname);
ses->domainName = kmalloc(len + 1, GFP_KERNEL);
if (ses->domainName)
strcpy(ses->domainName, volume_info->domainname);
}
ses->linux_uid = volume_info->linux_uid;
ses->overrideSecFlg = volume_info->secFlg;
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
rc = cifs_setup_session(xid, ses, volume_info->local_nls);
mutex_unlock(&ses->session_mutex);
if (rc)
goto get_ses_fail;
/* success, put it on the list */
write_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
write_unlock(&cifs_tcp_ses_lock);
FreeXid(xid);
return ses;
get_ses_fail:
sesInfoFree(ses);
FreeXid(xid);
return ERR_PTR(rc);
} | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
int rc = -ENOMEM, xid;
struct cifsSesInfo *ses;
xid = GetXid();
ses = cifs_find_smb_ses(server, volume_info->username);
if (ses) {
cFYI(1, "Existing smb sess found (status=%d)", ses->status);
cifs_put_tcp_session(server);
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (rc) {
mutex_unlock(&ses->session_mutex);
cifs_put_smb_ses(ses);
FreeXid(xid);
return ERR_PTR(rc);
}
if (ses->need_reconnect) {
cFYI(1, "Session needs reconnect");
rc = cifs_setup_session(xid, ses,
volume_info->local_nls);
if (rc) {
mutex_unlock(&ses->session_mutex);
cifs_put_smb_ses(ses);
FreeXid(xid);
return ERR_PTR(rc);
}
}
mutex_unlock(&ses->session_mutex);
FreeXid(xid);
return ses;
}
cFYI(1, "Existing smb sess not found");
ses = sesInfoAlloc();
if (ses == NULL)
goto get_ses_fail;
ses->server = server;
if (server->addr.sockAddr6.sin6_family == AF_INET6)
sprintf(ses->serverName, "%pI6",
&server->addr.sockAddr6.sin6_addr);
else
sprintf(ses->serverName, "%pI4",
&server->addr.sockAddr.sin_addr.s_addr);
if (volume_info->username)
strncpy(ses->userName, volume_info->username,
MAX_USERNAME_SIZE);
if (volume_info->password) {
ses->password = kstrdup(volume_info->password, GFP_KERNEL);
if (!ses->password)
goto get_ses_fail;
}
if (volume_info->domainname) {
int len = strlen(volume_info->domainname);
ses->domainName = kmalloc(len + 1, GFP_KERNEL);
if (ses->domainName)
strcpy(ses->domainName, volume_info->domainname);
}
ses->linux_uid = volume_info->linux_uid;
ses->overrideSecFlg = volume_info->secFlg;
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
rc = cifs_setup_session(xid, ses, volume_info->local_nls);
mutex_unlock(&ses->session_mutex);
if (rc)
goto get_ses_fail;
write_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
write_unlock(&cifs_tcp_ses_lock);
FreeXid(xid);
return ses;
get_ses_fail:
sesInfoFree(ses);
FreeXid(xid);
return ERR_PTR(rc);
} | 1,173 |
0 | static void build_inter_predictors_for_planes ( MACROBLOCKD * xd , BLOCK_SIZE bsize , int mi_row , int mi_col , int plane_from , int plane_to ) {
int plane ;
const int mi_x = mi_col * MI_SIZE ;
const int mi_y = mi_row * MI_SIZE ;
for ( plane = plane_from ;
plane <= plane_to ;
++ plane ) {
const BLOCK_SIZE plane_bsize = get_plane_block_size ( bsize , & xd -> plane [ plane ] ) ;
const int num_4x4_w = num_4x4_blocks_wide_lookup [ plane_bsize ] ;
const int num_4x4_h = num_4x4_blocks_high_lookup [ plane_bsize ] ;
const int bw = 4 * num_4x4_w ;
const int bh = 4 * num_4x4_h ;
if ( xd -> mi [ 0 ] . src_mi -> mbmi . sb_type < BLOCK_8X8 ) {
int i = 0 , x , y ;
assert ( bsize == BLOCK_8X8 ) ;
for ( y = 0 ;
y < num_4x4_h ;
++ y ) for ( x = 0 ;
x < num_4x4_w ;
++ x ) build_inter_predictors ( xd , plane , i ++ , bw , bh , * x , 4 * y , 4 , 4 , mi_x , mi_y ) ;
}
else {
build_inter_predictors ( xd , plane , 0 , bw , bh , 0 , 0 , bw , bh , mi_x , mi_y ) ;
}
}
} | static void build_inter_predictors_for_planes ( MACROBLOCKD * xd , BLOCK_SIZE bsize , int mi_row , int mi_col , int plane_from , int plane_to ) {
int plane ;
const int mi_x = mi_col * MI_SIZE ;
const int mi_y = mi_row * MI_SIZE ;
for ( plane = plane_from ;
plane <= plane_to ;
++ plane ) {
const BLOCK_SIZE plane_bsize = get_plane_block_size ( bsize , & xd -> plane [ plane ] ) ;
const int num_4x4_w = num_4x4_blocks_wide_lookup [ plane_bsize ] ;
const int num_4x4_h = num_4x4_blocks_high_lookup [ plane_bsize ] ;
const int bw = 4 * num_4x4_w ;
const int bh = 4 * num_4x4_h ;
if ( xd -> mi [ 0 ] . src_mi -> mbmi . sb_type < BLOCK_8X8 ) {
int i = 0 , x , y ;
assert ( bsize == BLOCK_8X8 ) ;
for ( y = 0 ;
y < num_4x4_h ;
++ y ) for ( x = 0 ;
x < num_4x4_w ;
++ x ) build_inter_predictors ( xd , plane , i ++ , bw , bh , * x , 4 * y , 4 , 4 , mi_x , mi_y ) ;
}
else {
build_inter_predictors ( xd , plane , 0 , bw , bh , 0 , 0 , bw , bh , mi_x , mi_y ) ;
}
}
} | 1,174 |
1 | _AFmoduleinst _af_ms_adpcm_init_decompress (_Track *track, AFvirtualfile *fh,
bool seekok, bool headerless, AFframecount *chunkframes)
{
_AFmoduleinst ret = _AFnewmodinst(&ms_adpcm_decompress);
ms_adpcm_data *d;
AUpvlist pv;
long l;
void *v;
assert(af_ftell(fh) == track->fpos_first_frame);
d = (ms_adpcm_data *) _af_malloc(sizeof (ms_adpcm_data));
d->track = track;
d->fh = fh;
d->track->frames2ignore = 0;
d->track->fpos_next_frame = d->track->fpos_first_frame;
pv = d->track->f.compressionParams;
if (_af_pv_getlong(pv, _AF_MS_ADPCM_NUM_COEFFICIENTS, &l))
d->numCoefficients = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "number of coefficients not set");
if (_af_pv_getptr(pv, _AF_MS_ADPCM_COEFFICIENTS, &v))
memcpy(d->coefficients, v, sizeof (int16_t) * 256 * 2);
else
_af_error(AF_BAD_CODEC_CONFIG, "coefficient array not set");
if (_af_pv_getlong(pv, _AF_SAMPLES_PER_BLOCK, &l))
d->samplesPerBlock = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "samples per block not set");
if (_af_pv_getlong(pv, _AF_BLOCK_SIZE, &l))
d->blockAlign = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "block size not set");
*chunkframes = d->samplesPerBlock / d->track->f.channelCount;
ret.modspec = d;
return ret;
} | _AFmoduleinst _af_ms_adpcm_init_decompress (_Track *track, AFvirtualfile *fh,
bool seekok, bool headerless, AFframecount *chunkframes)
{
_AFmoduleinst ret = _AFnewmodinst(&ms_adpcm_decompress);
ms_adpcm_data *d;
AUpvlist pv;
long l;
void *v;
assert(af_ftell(fh) == track->fpos_first_frame);
d = (ms_adpcm_data *) _af_malloc(sizeof (ms_adpcm_data));
d->track = track;
d->fh = fh;
d->track->frames2ignore = 0;
d->track->fpos_next_frame = d->track->fpos_first_frame;
pv = d->track->f.compressionParams;
if (_af_pv_getlong(pv, _AF_MS_ADPCM_NUM_COEFFICIENTS, &l))
d->numCoefficients = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "number of coefficients not set");
if (_af_pv_getptr(pv, _AF_MS_ADPCM_COEFFICIENTS, &v))
memcpy(d->coefficients, v, sizeof (int16_t) * 256 * 2);
else
_af_error(AF_BAD_CODEC_CONFIG, "coefficient array not set");
if (_af_pv_getlong(pv, _AF_SAMPLES_PER_BLOCK, &l))
d->samplesPerBlock = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "samples per block not set");
if (_af_pv_getlong(pv, _AF_BLOCK_SIZE, &l))
d->blockAlign = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "block size not set");
*chunkframes = d->samplesPerBlock / d->track->f.channelCount;
ret.modspec = d;
return ret;
} | 1,175 |
0 | void verifyDirectoryPermissions(const string &path, struct stat &buf) {
TRACE_POINT();
if (buf.st_mode != (S_IFDIR | parseModeString("u=rwx,g=rx,o=rx"))) {
throw RuntimeException("Tried to reuse existing server instance directory " +
path + ", but it has wrong permissions");
} else if (buf.st_uid != geteuid() || buf.st_gid != getegid()) {
/* The server instance directory is always created by the Watchdog. Its UID/GID never
* changes because:
* 1. Disabling user switching only lowers the privilege of the HelperAgent.
* 2. For the UID/GID to change, the web server must be completely restarted
* (not just graceful reload) so that the control process can change its UID/GID.
* This causes the PID to change, so that an entirely new server instance
* directory is created.
*/
throw RuntimeException("Tried to reuse existing server instance directory " +
path + ", but it has wrong owner and group");
}
} | void verifyDirectoryPermissions(const string &path, struct stat &buf) {
TRACE_POINT();
if (buf.st_mode != (S_IFDIR | parseModeString("u=rwx,g=rx,o=rx"))) {
throw RuntimeException("Tried to reuse existing server instance directory " +
path + ", but it has wrong permissions");
} else if (buf.st_uid != geteuid() || buf.st_gid != getegid()) {
throw RuntimeException("Tried to reuse existing server instance directory " +
path + ", but it has wrong owner and group");
}
} | 1,176 |
1 | int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, const AVFrame *reference) { int len, nb_components, i, h, v, predictor, point_transform; int index, id, ret; const int block_size = s->lossless ? 1 : 8; int ilv, prev_shift; if (!s->got_picture) { av_log(s->avctx, AV_LOG_WARNING, "Can not process SOS before SOF, skipping\n"); return -1; } av_assert0(s->picture_ptr->data[0]); /* XXX: verify len field validity */ len = get_bits(&s->gb, 16); nb_components = get_bits(&s->gb, 8); if (nb_components == 0 || nb_components > MAX_COMPONENTS) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: nb_components (%d) unsupported\n", nb_components); return AVERROR_PATCHWELCOME; } if (len != 6 + 2 * nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len); return AVERROR_INVALIDDATA; } for (i = 0; i < nb_components; i++) { id = get_bits(&s->gb, 8) - 1; av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id); /* find component index */ for (index = 0; index < s->nb_components; index++) if (id == s->component_id[index]) break; if (index == s->nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: index(%d) out of components\n", index); return AVERROR_INVALIDDATA; } /* Metasoft MJPEG codec has Cb and Cr swapped */ if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J') && nb_components == 3 && s->nb_components == 3 && i) index = 3 - i; s->quant_sindex[i] = s->quant_index[index]; s->nb_blocks[i] = s->h_count[index] * s->v_count[index]; s->h_scount[i] = s->h_count[index]; s->v_scount[i] = s->v_count[index]; if(nb_components == 3 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P) index = (i+2)%3; if(nb_components == 1 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P) index = (index+2)%3; s->comp_index[i] = index; s->dc_index[i] = get_bits(&s->gb, 4); s->ac_index[i] = get_bits(&s->gb, 4); if (s->dc_index[i] < 0 || s->ac_index[i] < 0 || s->dc_index[i] >= 4 || s->ac_index[i] >= 4) goto out_of_range; if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table)) goto out_of_range; } predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */ ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */ if(s->avctx->codec_tag != AV_RL32("CJPG")){ prev_shift = get_bits(&s->gb, 4); /* Ah */ point_transform = get_bits(&s->gb, 4); /* Al */ }else prev_shift = point_transform = 0; if (nb_components > 1) { /* interleaved stream */ s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size); s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size); } else if (!s->ls) { /* skip this for JPEG-LS */ h = s->h_max / s->h_scount[0]; v = s->v_max / s->v_scount[0]; s->mb_width = (s->width + h * block_size - 1) / (h * block_size); s->mb_height = (s->height + v * block_size - 1) / (v * block_size); s->nb_blocks[0] = 1; s->h_scount[0] = 1; s->v_scount[0] = 1; } if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n", s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "", predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod, s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components); /* mjpeg-b can have padding bytes between sos and image data, skip them */ for (i = s->mjpb_skiptosod; i > 0; i--) skip_bits(&s->gb, 8); next_field: for (i = 0; i < nb_components; i++) s->last_dc[i] = (4 << s->bits); if (s->lossless) { av_assert0(s->picture_ptr == s->picture); if (CONFIG_JPEGLS_DECODER && s->ls) { // for () { // reset_ls_coding_parameters(s, 0); if ((ret = ff_jpegls_decode_picture(s, predictor, point_transform, ilv)) < 0) return ret; } else { if (s->rgb) { if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0) return ret; } else { if ((ret = ljpeg_decode_yuv_scan(s, predictor, point_transform, nb_components)) < 0) return ret; } } } else { if (s->progressive && predictor) { av_assert0(s->picture_ptr == s->picture); if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor, ilv, prev_shift, point_transform)) < 0) return ret; } else { if ((ret = mjpeg_decode_scan(s, nb_components, prev_shift, point_transform, mb_bitmask, reference)) < 0) return ret; } } if (s->interlaced && get_bits_left(&s->gb) > 32 && show_bits(&s->gb, 8) == 0xFF) { GetBitContext bak = s->gb; align_get_bits(&bak); if (show_bits(&bak, 16) == 0xFFD1) { av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n"); s->gb = bak; skip_bits(&s->gb, 16); s->bottom_field ^= 1; goto next_field; } } emms_c(); return 0; out_of_range: av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n"); return AVERROR_INVALIDDATA; } | int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, const AVFrame *reference) { int len, nb_components, i, h, v, predictor, point_transform; int index, id, ret; const int block_size = s->lossless ? 1 : 8; int ilv, prev_shift; if (!s->got_picture) { av_log(s->avctx, AV_LOG_WARNING, "Can not process SOS before SOF, skipping\n"); return -1; } av_assert0(s->picture_ptr->data[0]); len = get_bits(&s->gb, 16); nb_components = get_bits(&s->gb, 8); if (nb_components == 0 || nb_components > MAX_COMPONENTS) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: nb_components (%d) unsupported\n", nb_components); return AVERROR_PATCHWELCOME; } if (len != 6 + 2 * nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len); return AVERROR_INVALIDDATA; } for (i = 0; i < nb_components; i++) { id = get_bits(&s->gb, 8) - 1; av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id); for (index = 0; index < s->nb_components; index++) if (id == s->component_id[index]) break; if (index == s->nb_components) { av_log(s->avctx, AV_LOG_ERROR, "decode_sos: index(%d) out of components\n", index); return AVERROR_INVALIDDATA; } if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J') && nb_components == 3 && s->nb_components == 3 && i) index = 3 - i; s->quant_sindex[i] = s->quant_index[index]; s->nb_blocks[i] = s->h_count[index] * s->v_count[index]; s->h_scount[i] = s->h_count[index]; s->v_scount[i] = s->v_count[index]; if(nb_components == 3 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P) index = (i+2)%3; if(nb_components == 1 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P) index = (index+2)%3; s->comp_index[i] = index; s->dc_index[i] = get_bits(&s->gb, 4); s->ac_index[i] = get_bits(&s->gb, 4); if (s->dc_index[i] < 0 || s->ac_index[i] < 0 || s->dc_index[i] >= 4 || s->ac_index[i] >= 4) goto out_of_range; if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table)) goto out_of_range; } predictor = get_bits(&s->gb, 8); ilv = get_bits(&s->gb, 8); if(s->avctx->codec_tag != AV_RL32("CJPG")){ prev_shift = get_bits(&s->gb, 4); point_transform = get_bits(&s->gb, 4); }else prev_shift = point_transform = 0; if (nb_components > 1) { s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size); s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size); } else if (!s->ls) { h = s->h_max / s->h_scount[0]; v = s->v_max / s->v_scount[0]; s->mb_width = (s->width + h * block_size - 1) / (h * block_size); s->mb_height = (s->height + v * block_size - 1) / (v * block_size); s->nb_blocks[0] = 1; s->h_scount[0] = 1; s->v_scount[0] = 1; } if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n", s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "", predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod, s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components); for (i = s->mjpb_skiptosod; i > 0; i--) skip_bits(&s->gb, 8); next_field: for (i = 0; i < nb_components; i++) s->last_dc[i] = (4 << s->bits); if (s->lossless) { av_assert0(s->picture_ptr == s->picture); if (CONFIG_JPEGLS_DECODER && s->ls) { | 1,177 |
1 | static void ms_adpcm_run_pull (_AFmoduleinst *module)
{
ms_adpcm_data *d = (ms_adpcm_data *) module->modspec;
AFframecount frames2read = module->outc->nframes;
AFframecount nframes = 0;
int i, framesPerBlock, blockCount;
ssize_t blocksRead, bytesDecoded;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
assert(module->outc->nframes % framesPerBlock == 0);
blockCount = module->outc->nframes / framesPerBlock;
/* Read the compressed frames. */
blocksRead = af_fread(module->inc->buf, d->blockAlign, blockCount, d->fh);
/* Decompress into module->outc. */
for (i=0; i<blockCount; i++)
{
bytesDecoded = ms_adpcm_decode_block(d,
(uint8_t *) module->inc->buf + i * d->blockAlign,
(int16_t *) module->outc->buf + i * d->samplesPerBlock);
nframes += framesPerBlock;
}
d->track->nextfframe += nframes;
if (blocksRead > 0)
d->track->fpos_next_frame += blocksRead * d->blockAlign;
assert(af_ftell(d->fh) == d->track->fpos_next_frame);
/*
If we got EOF from read, then we return the actual amount read.
Complain only if there should have been more frames in the file.
*/
if (d->track->totalfframes != -1 && nframes != frames2read)
{
/* Report error if we haven't already */
if (d->track->filemodhappy)
{
_af_error(AF_BAD_READ,
"file missing data -- read %d frames, should be %d",
d->track->nextfframe,
d->track->totalfframes);
d->track->filemodhappy = AF_FALSE;
}
}
module->outc->nframes = nframes;
} | static void ms_adpcm_run_pull (_AFmoduleinst *module)
{
ms_adpcm_data *d = (ms_adpcm_data *) module->modspec;
AFframecount frames2read = module->outc->nframes;
AFframecount nframes = 0;
int i, framesPerBlock, blockCount;
ssize_t blocksRead, bytesDecoded;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
assert(module->outc->nframes % framesPerBlock == 0);
blockCount = module->outc->nframes / framesPerBlock;
blocksRead = af_fread(module->inc->buf, d->blockAlign, blockCount, d->fh);
for (i=0; i<blockCount; i++)
{
bytesDecoded = ms_adpcm_decode_block(d,
(uint8_t *) module->inc->buf + i * d->blockAlign,
(int16_t *) module->outc->buf + i * d->samplesPerBlock);
nframes += framesPerBlock;
}
d->track->nextfframe += nframes;
if (blocksRead > 0)
d->track->fpos_next_frame += blocksRead * d->blockAlign;
assert(af_ftell(d->fh) == d->track->fpos_next_frame);
if (d->track->totalfframes != -1 && nframes != frames2read)
{
if (d->track->filemodhappy)
{
_af_error(AF_BAD_READ,
"file missing data -- read %d frames, should be %d",
d->track->nextfframe,
d->track->totalfframes);
d->track->filemodhappy = AF_FALSE;
}
}
module->outc->nframes = nframes;
} | 1,179 |
1 | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
{
atomic_sub(skb->truesize, &nf->mem);
kfree_skb(skb);
} | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
{
atomic_sub(skb->truesize, &nf->mem);
kfree_skb(skb);
} | 1,181 |
1 | static void v9fs_renameat(void *opaque) { ssize_t err = 0; size_t offset = 7; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; int32_t olddirfid, newdirfid; V9fsString old_name, new_name; v9fs_string_init(&old_name); v9fs_string_init(&new_name); err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid, &old_name, &newdirfid, &new_name); if (err < 0) { if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) { err = -ENOENT; v9fs_path_write_lock(s); err = v9fs_complete_renameat(pdu, olddirfid, &old_name, newdirfid, &new_name); v9fs_path_unlock(s); if (!err) { err = offset; out_err: pdu_complete(pdu, err); v9fs_string_free(&old_name); v9fs_string_free(&new_name); | static void v9fs_renameat(void *opaque) { ssize_t err = 0; size_t offset = 7; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; int32_t olddirfid, newdirfid; V9fsString old_name, new_name; v9fs_string_init(&old_name); v9fs_string_init(&new_name); err = pdu_unmarshal(pdu, offset, "dsds", &olddirfid, &old_name, &newdirfid, &new_name); if (err < 0) { if (name_is_illegal(old_name.data) || name_is_illegal(new_name.data)) { err = -ENOENT; v9fs_path_write_lock(s); err = v9fs_complete_renameat(pdu, olddirfid, &old_name, newdirfid, &new_name); v9fs_path_unlock(s); if (!err) { err = offset; out_err: pdu_complete(pdu, err); v9fs_string_free(&old_name); v9fs_string_free(&new_name); | 1,182 |
0 | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff)
{
struct sk_buff *prev, *next;
struct net_device *dev;
int offset, end;
struct net *net = dev_net(skb_dst(skb)->dev);
if (fq->q.last_in & INET_FRAG_COMPLETE)
goto err;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((u8 *)&fhdr->frag_off -
skb_network_header(skb)));
return -1;
}
if (skb->ip_summed == CHECKSUM_COMPLETE) {
const unsigned char *nh = skb_network_header(skb);
skb->csum = csum_sub(skb->csum,
csum_partial(nh, (u8 *)(fhdr + 1) - nh,
0));
}
/* Is this the final fragment? */
if (!(fhdr->frag_off & htons(IP6_MF))) {
/* If we already have some bits beyond end
* or have different end, the segment is corrupted.
*/
if (end < fq->q.len ||
((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
goto err;
fq->q.last_in |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
/* Check if the fragment is rounded to 8 bytes.
* Required by the RFC.
*/
if (end & 0x7) {
/* RFC2460 says always send parameter problem in
* this case. -DaveM
*/
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return -1;
}
if (end > fq->q.len) {
/* Some bits beyond end -> corruption. */
if (fq->q.last_in & INET_FRAG_LAST_IN)
goto err;
fq->q.len = end;
}
}
if (end == offset)
goto err;
/* Point into the IP datagram 'data' part. */
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
goto err;
if (pskb_trim_rcsum(skb, end - offset))
goto err;
/* Find out which fragments are in front and at the back of us
* in the chain of fragments so far. We must know where to put
* this fragment, right?
*/
prev = fq->q.fragments_tail;
if (!prev || FRAG6_CB(prev)->offset < offset) {
next = NULL;
goto found;
}
prev = NULL;
for(next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
break; /* bingo! */
prev = next;
}
found:
/* RFC5722, Section 4:
* When reassembling an IPv6 datagram, if
* one or more its constituent fragments is determined to be an
* overlapping fragment, the entire datagram (and any constituent
* fragments, including those not yet received) MUST be silently
* discarded.
*/
/* Check for overlap with preceding fragment. */
if (prev &&
(FRAG6_CB(prev)->offset + prev->len) - offset > 0)
goto discard_fq;
/* Look for overlap with succeeding segment. */
if (next && FRAG6_CB(next)->offset < end)
goto discard_fq;
FRAG6_CB(skb)->offset = offset;
/* Insert this fragment in the chain of fragments. */
skb->next = next;
if (!next)
fq->q.fragments_tail = skb;
if (prev)
prev->next = skb;
else
fq->q.fragments = skb;
dev = skb->dev;
if (dev) {
fq->iif = dev->ifindex;
skb->dev = NULL;
}
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
atomic_add(skb->truesize, &fq->q.net->mem);
/* The first fragment.
* nhoffset is obtained from the first fragment, of course.
*/
if (offset == 0) {
fq->nhoffset = nhoff;
fq->q.last_in |= INET_FRAG_FIRST_IN;
}
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len)
return ip6_frag_reasm(fq, prev, dev);
write_lock(&ip6_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&ip6_frags.lock);
return -1;
discard_fq:
fq_kill(fq);
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
} | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff)
{
struct sk_buff *prev, *next;
struct net_device *dev;
int offset, end;
struct net *net = dev_net(skb_dst(skb)->dev);
if (fq->q.last_in & INET_FRAG_COMPLETE)
goto err;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((u8 *)&fhdr->frag_off -
skb_network_header(skb)));
return -1;
}
if (skb->ip_summed == CHECKSUM_COMPLETE) {
const unsigned char *nh = skb_network_header(skb);
skb->csum = csum_sub(skb->csum,
csum_partial(nh, (u8 *)(fhdr + 1) - nh,
0));
}
if (!(fhdr->frag_off & htons(IP6_MF))) {
if (end < fq->q.len ||
((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
goto err;
fq->q.last_in |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
if (end & 0x7) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return -1;
}
if (end > fq->q.len) {
if (fq->q.last_in & INET_FRAG_LAST_IN)
goto err;
fq->q.len = end;
}
}
if (end == offset)
goto err;
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
goto err;
if (pskb_trim_rcsum(skb, end - offset))
goto err;
prev = fq->q.fragments_tail;
if (!prev || FRAG6_CB(prev)->offset < offset) {
next = NULL;
goto found;
}
prev = NULL;
for(next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
break;
prev = next;
}
found:
if (prev &&
(FRAG6_CB(prev)->offset + prev->len) - offset > 0)
goto discard_fq;
if (next && FRAG6_CB(next)->offset < end)
goto discard_fq;
FRAG6_CB(skb)->offset = offset;
skb->next = next;
if (!next)
fq->q.fragments_tail = skb;
if (prev)
prev->next = skb;
else
fq->q.fragments = skb;
dev = skb->dev;
if (dev) {
fq->iif = dev->ifindex;
skb->dev = NULL;
}
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
atomic_add(skb->truesize, &fq->q.net->mem);
if (offset == 0) {
fq->nhoffset = nhoff;
fq->q.last_in |= INET_FRAG_FIRST_IN;
}
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len)
return ip6_frag_reasm(fq, prev, dev);
write_lock(&ip6_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&ip6_frags.lock);
return -1;
discard_fq:
fq_kill(fq);
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
} | 1,183 |
1 | getFileTypeNoFollowSymlinks(const StaticString &filename) {
struct stat buf;
int ret;
ret = lstat(filename.c_str(), &buf);
if (ret == 0) {
if (S_ISREG(buf.st_mode)) {
return FT_REGULAR;
} else if (S_ISDIR(buf.st_mode)) {
return FT_DIRECTORY;
} else if (S_ISLNK(buf.st_mode)) {
return FT_SYMLINK;
} else {
return FT_OTHER;
}
} else {
if (errno == ENOENT) {
return FT_NONEXISTANT;
} else {
int e = errno;
string message("Cannot lstat '");
message.append(filename);
message.append("'");
throw FileSystemException(message, e, filename);
}
}
} | getFileTypeNoFollowSymlinks(const StaticString &filename) {
struct stat buf;
int ret;
ret = lstat(filename.c_str(), &buf);
if (ret == 0) {
if (S_ISREG(buf.st_mode)) {
return FT_REGULAR;
} else if (S_ISDIR(buf.st_mode)) {
return FT_DIRECTORY;
} else if (S_ISLNK(buf.st_mode)) {
return FT_SYMLINK;
} else {
return FT_OTHER;
}
} else {
if (errno == ENOENT) {
return FT_NONEXISTANT;
} else {
int e = errno;
string message("Cannot lstat '");
message.append(filename);
message.append("'");
throw FileSystemException(message, e, filename);
}
}
} | 1,184 |
1 | static void ms_adpcm_reset2 (_AFmoduleinst *i)
{
ms_adpcm_data *d = (ms_adpcm_data *) i->modspec;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
d->track->fpos_next_frame = d->track->fpos_first_frame +
d->blockAlign * (d->track->nextfframe / framesPerBlock);
d->track->frames2ignore += d->framesToIgnore;
assert(d->track->nextfframe % framesPerBlock == 0);
} | static void ms_adpcm_reset2 (_AFmoduleinst *i)
{
ms_adpcm_data *d = (ms_adpcm_data *) i->modspec;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
d->track->fpos_next_frame = d->track->fpos_first_frame +
d->blockAlign * (d->track->nextfframe / framesPerBlock);
d->track->frames2ignore += d->framesToIgnore;
assert(d->track->nextfframe % framesPerBlock == 0);
} | 1,186 |
0 | void ff_draw_horiz_band ( AVCodecContext * avctx , DSPContext * dsp , Picture * cur , Picture * last , int y , int h , int picture_structure , int first_field , int draw_edges , int low_delay , int v_edge_pos , int h_edge_pos ) {
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( avctx -> pix_fmt ) ;
int hshift = desc -> log2_chroma_w ;
int vshift = desc -> log2_chroma_h ;
const int field_pic = picture_structure != PICT_FRAME ;
if ( field_pic ) {
h <<= 1 ;
y <<= 1 ;
}
if ( ! avctx -> hwaccel && ! ( avctx -> codec -> capabilities & CODEC_CAP_HWACCEL_VDPAU ) && draw_edges && cur -> reference && ! ( avctx -> flags & CODEC_FLAG_EMU_EDGE ) ) {
int * linesize = cur -> f . linesize ;
int sides = 0 , edge_h ;
if ( y == 0 ) sides |= EDGE_TOP ;
if ( y + h >= v_edge_pos ) sides |= EDGE_BOTTOM ;
edge_h = FFMIN ( h , v_edge_pos - y ) ;
dsp -> draw_edges ( cur -> f . data [ 0 ] + y * linesize [ 0 ] , linesize [ 0 ] , h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides ) ;
dsp -> draw_edges ( cur -> f . data [ 1 ] + ( y >> vshift ) * linesize [ 1 ] , linesize [ 1 ] , h_edge_pos >> hshift , edge_h >> vshift , EDGE_WIDTH >> hshift , EDGE_WIDTH >> vshift , sides ) ;
dsp -> draw_edges ( cur -> f . data [ 2 ] + ( y >> vshift ) * linesize [ 2 ] , linesize [ 2 ] , h_edge_pos >> hshift , edge_h >> vshift , EDGE_WIDTH >> hshift , EDGE_WIDTH >> vshift , sides ) ;
}
h = FFMIN ( h , avctx -> height - y ) ;
if ( field_pic && first_field && ! ( avctx -> slice_flags & SLICE_FLAG_ALLOW_FIELD ) ) return ;
if ( avctx -> draw_horiz_band ) {
AVFrame * src ;
int offset [ AV_NUM_DATA_POINTERS ] ;
int i ;
if ( cur -> f . pict_type == AV_PICTURE_TYPE_B || low_delay || ( avctx -> slice_flags & SLICE_FLAG_CODED_ORDER ) ) src = & cur -> f ;
else if ( last ) src = & last -> f ;
else return ;
if ( cur -> f . pict_type == AV_PICTURE_TYPE_B && picture_structure == PICT_FRAME && avctx -> codec_id != AV_CODEC_ID_H264 && avctx -> codec_id != AV_CODEC_ID_SVQ3 ) {
for ( i = 0 ;
i < AV_NUM_DATA_POINTERS ;
i ++ ) offset [ i ] = 0 ;
}
else {
offset [ 0 ] = y * src -> linesize [ 0 ] ;
offset [ 1 ] = offset [ 2 ] = ( y >> vshift ) * src -> linesize [ 1 ] ;
for ( i = 3 ;
i < AV_NUM_DATA_POINTERS ;
i ++ ) offset [ i ] = 0 ;
}
emms_c ( ) ;
avctx -> draw_horiz_band ( avctx , src , offset , y , picture_structure , h ) ;
}
} | void ff_draw_horiz_band ( AVCodecContext * avctx , DSPContext * dsp , Picture * cur , Picture * last , int y , int h , int picture_structure , int first_field , int draw_edges , int low_delay , int v_edge_pos , int h_edge_pos ) {
const AVPixFmtDescriptor * desc = av_pix_fmt_desc_get ( avctx -> pix_fmt ) ;
int hshift = desc -> log2_chroma_w ;
int vshift = desc -> log2_chroma_h ;
const int field_pic = picture_structure != PICT_FRAME ;
if ( field_pic ) {
h <<= 1 ;
y <<= 1 ;
}
if ( ! avctx -> hwaccel && ! ( avctx -> codec -> capabilities & CODEC_CAP_HWACCEL_VDPAU ) && draw_edges && cur -> reference && ! ( avctx -> flags & CODEC_FLAG_EMU_EDGE ) ) {
int * linesize = cur -> f . linesize ;
int sides = 0 , edge_h ;
if ( y == 0 ) sides |= EDGE_TOP ;
if ( y + h >= v_edge_pos ) sides |= EDGE_BOTTOM ;
edge_h = FFMIN ( h , v_edge_pos - y ) ;
dsp -> draw_edges ( cur -> f . data [ 0 ] + y * linesize [ 0 ] , linesize [ 0 ] , h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides ) ;
dsp -> draw_edges ( cur -> f . data [ 1 ] + ( y >> vshift ) * linesize [ 1 ] , linesize [ 1 ] , h_edge_pos >> hshift , edge_h >> vshift , EDGE_WIDTH >> hshift , EDGE_WIDTH >> vshift , sides ) ;
dsp -> draw_edges ( cur -> f . data [ 2 ] + ( y >> vshift ) * linesize [ 2 ] , linesize [ 2 ] , h_edge_pos >> hshift , edge_h >> vshift , EDGE_WIDTH >> hshift , EDGE_WIDTH >> vshift , sides ) ;
}
h = FFMIN ( h , avctx -> height - y ) ;
if ( field_pic && first_field && ! ( avctx -> slice_flags & SLICE_FLAG_ALLOW_FIELD ) ) return ;
if ( avctx -> draw_horiz_band ) {
AVFrame * src ;
int offset [ AV_NUM_DATA_POINTERS ] ;
int i ;
if ( cur -> f . pict_type == AV_PICTURE_TYPE_B || low_delay || ( avctx -> slice_flags & SLICE_FLAG_CODED_ORDER ) ) src = & cur -> f ;
else if ( last ) src = & last -> f ;
else return ;
if ( cur -> f . pict_type == AV_PICTURE_TYPE_B && picture_structure == PICT_FRAME && avctx -> codec_id != AV_CODEC_ID_H264 && avctx -> codec_id != AV_CODEC_ID_SVQ3 ) {
for ( i = 0 ;
i < AV_NUM_DATA_POINTERS ;
i ++ ) offset [ i ] = 0 ;
}
else {
offset [ 0 ] = y * src -> linesize [ 0 ] ;
offset [ 1 ] = offset [ 2 ] = ( y >> vshift ) * src -> linesize [ 1 ] ;
for ( i = 3 ;
i < AV_NUM_DATA_POINTERS ;
i ++ ) offset [ i ] = 0 ;
}
emms_c ( ) ;
avctx -> draw_horiz_band ( avctx , src , offset , y , picture_structure , h ) ;
}
} | 1,187 |
1 | SpawnPreparationInfo prepareSpawn(const Options &options) {
TRACE_POINT();
SpawnPreparationInfo info;
prepareChroot(info, options);
info.userSwitching = prepareUserSwitching(options);
prepareSwitchingWorkingDirectory(info, options);
inferApplicationInfo(info);
return info;
} | SpawnPreparationInfo prepareSpawn(const Options &options) {
TRACE_POINT();
SpawnPreparationInfo info;
prepareChroot(info, options);
info.userSwitching = prepareUserSwitching(options);
prepareSwitchingWorkingDirectory(info, options);
inferApplicationInfo(info);
return info;
} | 1,188 |
0 | static inline void take_option ( char * * to , char * from , int * first , int len ) {
if ( ! * first ) {
* * to = ',' ;
* to += 1 ;
}
else * first = 0 ;
memcpy ( * to , from , len ) ;
* to += len ;
} | static inline void take_option ( char * * to , char * from , int * first , int len ) {
if ( ! * first ) {
* * to = ',' ;
* to += 1 ;
}
else * first = 0 ;
memcpy ( * to , from , len ) ;
* to += len ;
} | 1,189 |
1 | static void ima_adpcm_reset2 (_AFmoduleinst *i)
{
ima_adpcm_data *d = (ima_adpcm_data *) i->modspec;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
d->track->fpos_next_frame = d->track->fpos_first_frame +
d->blockAlign * (d->track->nextfframe / framesPerBlock);
d->track->frames2ignore += d->framesToIgnore;
assert(d->track->nextfframe % framesPerBlock == 0);
} | static void ima_adpcm_reset2 (_AFmoduleinst *i)
{
ima_adpcm_data *d = (ima_adpcm_data *) i->modspec;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
d->track->fpos_next_frame = d->track->fpos_first_frame +
d->blockAlign * (d->track->nextfframe / framesPerBlock);
d->track->frames2ignore += d->framesToIgnore;
assert(d->track->nextfframe % framesPerBlock == 0);
} | 1,190 |
1 | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff)
{
struct sk_buff *prev, *next;
struct net_device *dev;
int offset, end;
struct net *net = dev_net(skb_dst(skb)->dev);
if (fq->q.last_in & INET_FRAG_COMPLETE)
goto err;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((u8 *)&fhdr->frag_off -
skb_network_header(skb)));
return -1;
}
if (skb->ip_summed == CHECKSUM_COMPLETE) {
const unsigned char *nh = skb_network_header(skb);
skb->csum = csum_sub(skb->csum,
csum_partial(nh, (u8 *)(fhdr + 1) - nh,
0));
}
/* Is this the final fragment? */
if (!(fhdr->frag_off & htons(IP6_MF))) {
/* If we already have some bits beyond end
* or have different end, the segment is corrupted.
*/
if (end < fq->q.len ||
((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
goto err;
fq->q.last_in |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
/* Check if the fragment is rounded to 8 bytes.
* Required by the RFC.
*/
if (end & 0x7) {
/* RFC2460 says always send parameter problem in
* this case. -DaveM
*/
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return -1;
}
if (end > fq->q.len) {
/* Some bits beyond end -> corruption. */
if (fq->q.last_in & INET_FRAG_LAST_IN)
goto err;
fq->q.len = end;
}
}
if (end == offset)
goto err;
/* Point into the IP datagram 'data' part. */
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
goto err;
if (pskb_trim_rcsum(skb, end - offset))
goto err;
/* Find out which fragments are in front and at the back of us
* in the chain of fragments so far. We must know where to put
* this fragment, right?
*/
prev = fq->q.fragments_tail;
if (!prev || FRAG6_CB(prev)->offset < offset) {
next = NULL;
goto found;
}
prev = NULL;
for(next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
break; /* bingo! */
prev = next;
}
found:
/* We found where to put this one. Check for overlap with
* preceding fragment, and, if needed, align things so that
* any overlaps are eliminated.
*/
if (prev) {
int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
if (i > 0) {
offset += i;
if (end <= offset)
goto err;
if (!pskb_pull(skb, i))
goto err;
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_NONE;
}
}
/* Look for overlap with succeeding segments.
* If we can merge fragments, do it.
*/
while (next && FRAG6_CB(next)->offset < end) {
int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
if (i < next->len) {
/* Eat head of the next overlapped fragment
* and leave the loop. The next ones cannot overlap.
*/
if (!pskb_pull(next, i))
goto err;
FRAG6_CB(next)->offset += i; /* next fragment */
fq->q.meat -= i;
if (next->ip_summed != CHECKSUM_UNNECESSARY)
next->ip_summed = CHECKSUM_NONE;
break;
} else {
struct sk_buff *free_it = next;
/* Old fragment is completely overridden with
* new one drop it.
*/
next = next->next;
if (prev)
prev->next = next;
else
fq->q.fragments = next;
fq->q.meat -= free_it->len;
frag_kfree_skb(fq->q.net, free_it);
}
}
FRAG6_CB(skb)->offset = offset;
/* Insert this fragment in the chain of fragments. */
skb->next = next;
if (!next)
fq->q.fragments_tail = skb;
if (prev)
prev->next = skb;
else
fq->q.fragments = skb;
dev = skb->dev;
if (dev) {
fq->iif = dev->ifindex;
skb->dev = NULL;
}
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
atomic_add(skb->truesize, &fq->q.net->mem);
/* The first fragment.
* nhoffset is obtained from the first fragment, of course.
*/
if (offset == 0) {
fq->nhoffset = nhoff;
fq->q.last_in |= INET_FRAG_FIRST_IN;
}
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len)
return ip6_frag_reasm(fq, prev, dev);
write_lock(&ip6_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&ip6_frags.lock);
return -1;
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
} | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff)
{
struct sk_buff *prev, *next;
struct net_device *dev;
int offset, end;
struct net *net = dev_net(skb_dst(skb)->dev);
if (fq->q.last_in & INET_FRAG_COMPLETE)
goto err;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((u8 *)&fhdr->frag_off -
skb_network_header(skb)));
return -1;
}
if (skb->ip_summed == CHECKSUM_COMPLETE) {
const unsigned char *nh = skb_network_header(skb);
skb->csum = csum_sub(skb->csum,
csum_partial(nh, (u8 *)(fhdr + 1) - nh,
0));
}
if (!(fhdr->frag_off & htons(IP6_MF))) {
if (end < fq->q.len ||
((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
goto err;
fq->q.last_in |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
if (end & 0x7) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len));
return -1;
}
if (end > fq->q.len) {
if (fq->q.last_in & INET_FRAG_LAST_IN)
goto err;
fq->q.len = end;
}
}
if (end == offset)
goto err;
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
goto err;
if (pskb_trim_rcsum(skb, end - offset))
goto err;
prev = fq->q.fragments_tail;
if (!prev || FRAG6_CB(prev)->offset < offset) {
next = NULL;
goto found;
}
prev = NULL;
for(next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
break;
prev = next;
}
found:
if (prev) {
int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
if (i > 0) {
offset += i;
if (end <= offset)
goto err;
if (!pskb_pull(skb, i))
goto err;
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_NONE;
}
}
while (next && FRAG6_CB(next)->offset < end) {
int i = end - FRAG6_CB(next)->offset;
if (i < next->len) {
if (!pskb_pull(next, i))
goto err;
FRAG6_CB(next)->offset += i;
fq->q.meat -= i;
if (next->ip_summed != CHECKSUM_UNNECESSARY)
next->ip_summed = CHECKSUM_NONE;
break;
} else {
struct sk_buff *free_it = next;
next = next->next;
if (prev)
prev->next = next;
else
fq->q.fragments = next;
fq->q.meat -= free_it->len;
frag_kfree_skb(fq->q.net, free_it);
}
}
FRAG6_CB(skb)->offset = offset;
skb->next = next;
if (!next)
fq->q.fragments_tail = skb;
if (prev)
prev->next = skb;
else
fq->q.fragments = skb;
dev = skb->dev;
if (dev) {
fq->iif = dev->ifindex;
skb->dev = NULL;
}
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
atomic_add(skb->truesize, &fq->q.net->mem);
if (offset == 0) {
fq->nhoffset = nhoff;
fq->q.last_in |= INET_FRAG_FIRST_IN;
}
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len)
return ip6_frag_reasm(fq, prev, dev);
write_lock(&ip6_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&ip6_frags.lock);
return -1;
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return -1;
} | 1,191 |
1 | static void mips_cpu_realizefn(DeviceState *dev, Error **errp) { MIPSCPU *cpu = MIPS_CPU(dev); MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(dev); cpu_reset(CPU(cpu)); mcc->parent_realize(dev, errp); } | static void mips_cpu_realizefn(DeviceState *dev, Error **errp) { MIPSCPU *cpu = MIPS_CPU(dev); MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(dev); cpu_reset(CPU(cpu)); mcc->parent_realize(dev, errp); } | 1,192 |
0 | int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p) { int ret = 0; if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) { AVIOContext pb; RTSPState *rt = s->priv_data; AVDictionary *opts = NULL; int len = strlen(p) * 6 / 8; char *buf = av_mallocz(len); av_base64_decode(buf, p, len); if (rtp_asf_fix_header(buf, len) < 0) av_log(s, AV_LOG_ERROR, "Failed to fix invalid RTSP-MS/ASF min_pktsize\n"); init_packetizer(&pb, buf, len); if (rt->asf_ctx) { avformat_close_input(&rt->asf_ctx); } if (!(rt->asf_ctx = avformat_alloc_context())) return AVERROR(ENOMEM); rt->asf_ctx->pb = &pb; av_dict_set(&opts, "no_resync_search", "1", 0); ret = avformat_open_input(&rt->asf_ctx, "", &ff_asf_demuxer, &opts); av_dict_free(&opts); if (ret < 0) return ret; av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0); rt->asf_pb_pos = avio_tell(&pb); av_free(buf); rt->asf_ctx->pb = NULL; } return ret; } | int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p) { int ret = 0; if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) { AVIOContext pb; RTSPState *rt = s->priv_data; AVDictionary *opts = NULL; int len = strlen(p) * 6 / 8; char *buf = av_mallocz(len); av_base64_decode(buf, p, len); if (rtp_asf_fix_header(buf, len) < 0) av_log(s, AV_LOG_ERROR, "Failed to fix invalid RTSP-MS/ASF min_pktsize\n"); init_packetizer(&pb, buf, len); if (rt->asf_ctx) { avformat_close_input(&rt->asf_ctx); } if (!(rt->asf_ctx = avformat_alloc_context())) return AVERROR(ENOMEM); rt->asf_ctx->pb = &pb; av_dict_set(&opts, "no_resync_search", "1", 0); ret = avformat_open_input(&rt->asf_ctx, "", &ff_asf_demuxer, &opts); av_dict_free(&opts); if (ret < 0) return ret; av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0); rt->asf_pb_pos = avio_tell(&pb); av_free(buf); rt->asf_ctx->pb = NULL; } return ret; } | 1,193 |
0 | static void main_external_compression_cleanup ( void ) {
int i ;
for ( i = 0 ;
i < num_subprocs ;
i += 1 ) {
if ( ! ext_subprocs [ i ] ) {
continue ;
}
kill ( ext_subprocs [ i ] , SIGTERM ) ;
ext_subprocs [ i ] = 0 ;
}
} | static void main_external_compression_cleanup ( void ) {
int i ;
for ( i = 0 ;
i < num_subprocs ;
i += 1 ) {
if ( ! ext_subprocs [ i ] ) {
continue ;
}
kill ( ext_subprocs [ i ] , SIGTERM ) ;
ext_subprocs [ i ] = 0 ;
}
} | 1,194 |
1 | void _af_adpcm_decoder (uint8_t *indata, int16_t *outdata, int len,
struct adpcm_state *state)
{
uint8_t *inp; /* Input buffer pointer */
int16_t *outp; /* output buffer pointer */
int sign; /* Current adpcm sign bit */
int delta; /* Current adpcm output value */
int step; /* Stepsize */
int valpred; /* Predicted value */
int vpdiff; /* Current change to valpred */
int index; /* Current step change index */
int inputbuffer; /* place to keep next 4-bit value */
int bufferstep; /* toggle between inputbuffer/input */
outp = outdata;
inp = indata;
valpred = state->valprev;
index = state->index;
step = stepsizeTable[index];
bufferstep = 0;
for ( ; len > 0 ; len-- ) {
/* Step 1 - get the delta value */
if ( bufferstep ) {
delta = (inputbuffer >> 4) & 0xf;
} else {
inputbuffer = *inp++;
delta = inputbuffer & 0xf;
}
bufferstep = !bufferstep;
/* Step 2 - Find new index value (for later) */
index += indexTable[delta];
if ( index < 0 ) index = 0;
if ( index > 88 ) index = 88;
/* Step 3 - Separate sign and magnitude */
sign = delta & 8;
delta = delta & 7;
/* Step 4 - Compute difference and new predicted value */
/*
** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
** in adpcm_coder.
*/
vpdiff = step >> 3;
if ( delta & 4 ) vpdiff += step;
if ( delta & 2 ) vpdiff += step>>1;
if ( delta & 1 ) vpdiff += step>>2;
if ( sign )
valpred -= vpdiff;
else
valpred += vpdiff;
/* Step 5 - clamp output value */
if ( valpred > 32767 )
valpred = 32767;
else if ( valpred < -32768 )
valpred = -32768;
/* Step 6 - Update step value */
step = stepsizeTable[index];
/* Step 7 - Output value */
*outp++ = valpred;
}
state->valprev = valpred;
state->index = index;
} | void _af_adpcm_decoder (uint8_t *indata, int16_t *outdata, int len,
struct adpcm_state *state)
{
uint8_t *inp;
int16_t *outp;
int sign;
int delta;
int step;
int valpred;
int vpdiff;
int index;
int inputbuffer;
int bufferstep;
outp = outdata;
inp = indata;
valpred = state->valprev;
index = state->index;
step = stepsizeTable[index];
bufferstep = 0;
for ( ; len > 0 ; len-- ) {
if ( bufferstep ) {
delta = (inputbuffer >> 4) & 0xf;
} else {
inputbuffer = *inp++;
delta = inputbuffer & 0xf;
}
bufferstep = !bufferstep;
index += indexTable[delta];
if ( index < 0 ) index = 0;
if ( index > 88 ) index = 88;
sign = delta & 8;
delta = delta & 7;
vpdiff = step >> 3;
if ( delta & 4 ) vpdiff += step;
if ( delta & 2 ) vpdiff += step>>1;
if ( delta & 1 ) vpdiff += step>>2;
if ( sign )
valpred -= vpdiff;
else
valpred += vpdiff;
if ( valpred > 32767 )
valpred = 32767;
else if ( valpred < -32768 )
valpred = -32768;
step = stepsizeTable[index];
*outp++ = valpred;
}
state->valprev = valpred;
state->index = index;
} | 1,195 |
0 | void *Type_MLU_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
cmsMLU* mlu;
cmsUInt32Number Count, RecLen, NumOfWchar;
cmsUInt32Number SizeOfHeader;
cmsUInt32Number Len, Offset;
cmsUInt32Number i;
wchar_t* Block;
cmsUInt32Number BeginOfThisString, EndOfThisString, LargestPosition;
*nItems = 0;
if (!_cmsReadUInt32Number(io, &Count)) return NULL;
if (!_cmsReadUInt32Number(io, &RecLen)) return NULL;
if (RecLen != 12) {
cmsSignalError(self->ContextID, cmsERROR_UNKNOWN_EXTENSION, "multiLocalizedUnicodeType of len != 12 is not supported.");
return NULL;
}
mlu = cmsMLUalloc(self ->ContextID, Count);
if (mlu == NULL) return NULL;
mlu ->UsedEntries = Count;
SizeOfHeader = 12 * Count + sizeof(_cmsTagBase);
LargestPosition = 0;
for (i=0; i < Count; i++) {
if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Language)) goto Error;
if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Country)) goto Error;
// Now deal with Len and offset.
if (!_cmsReadUInt32Number(io, &Len)) goto Error;
if (!_cmsReadUInt32Number(io, &Offset)) goto Error;
// Check for overflow
if (Offset < (SizeOfHeader + 8)) goto Error;
if ((Offset + Len) > SizeOfTag + 8) goto Error;
// True begin of the string
BeginOfThisString = Offset - SizeOfHeader - 8;
// Ajust to wchar_t elements
mlu ->Entries[i].Len = (Len * sizeof(wchar_t)) / sizeof(cmsUInt16Number);
mlu ->Entries[i].StrW = (BeginOfThisString * sizeof(wchar_t)) / sizeof(cmsUInt16Number);
// To guess maximum size, add offset + len
EndOfThisString = BeginOfThisString + Len;
if (EndOfThisString > LargestPosition)
LargestPosition = EndOfThisString;
}
// Now read the remaining of tag and fill all strings. Subtract the directory
SizeOfTag = (LargestPosition * sizeof(wchar_t)) / sizeof(cmsUInt16Number);
if (SizeOfTag == 0)
{
Block = NULL;
NumOfWchar = 0;
}
else
{
Block = (wchar_t*) _cmsMalloc(self ->ContextID, SizeOfTag);
if (Block == NULL) goto Error;
NumOfWchar = SizeOfTag / sizeof(wchar_t);
if (!_cmsReadWCharArray(io, NumOfWchar, Block)) goto Error;
}
mlu ->MemPool = Block;
mlu ->PoolSize = SizeOfTag;
mlu ->PoolUsed = SizeOfTag;
*nItems = 1;
return (void*) mlu;
Error:
if (mlu) cmsMLUfree(mlu);
return NULL;
} | void *Type_MLU_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
cmsMLU* mlu;
cmsUInt32Number Count, RecLen, NumOfWchar;
cmsUInt32Number SizeOfHeader;
cmsUInt32Number Len, Offset;
cmsUInt32Number i;
wchar_t* Block;
cmsUInt32Number BeginOfThisString, EndOfThisString, LargestPosition;
*nItems = 0;
if (!_cmsReadUInt32Number(io, &Count)) return NULL;
if (!_cmsReadUInt32Number(io, &RecLen)) return NULL;
if (RecLen != 12) {
cmsSignalError(self->ContextID, cmsERROR_UNKNOWN_EXTENSION, "multiLocalizedUnicodeType of len != 12 is not supported.");
return NULL;
}
mlu = cmsMLUalloc(self ->ContextID, Count);
if (mlu == NULL) return NULL;
mlu ->UsedEntries = Count;
SizeOfHeader = 12 * Count + sizeof(_cmsTagBase);
LargestPosition = 0;
for (i=0; i < Count; i++) {
if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Language)) goto Error;
if (!_cmsReadUInt16Number(io, &mlu ->Entries[i].Country)) goto Error;
if (!_cmsReadUInt32Number(io, &Len)) goto Error;
if (!_cmsReadUInt32Number(io, &Offset)) goto Error;
if (Offset < (SizeOfHeader + 8)) goto Error;
if ((Offset + Len) > SizeOfTag + 8) goto Error;
BeginOfThisString = Offset - SizeOfHeader - 8;
mlu ->Entries[i].Len = (Len * sizeof(wchar_t)) / sizeof(cmsUInt16Number);
mlu ->Entries[i].StrW = (BeginOfThisString * sizeof(wchar_t)) / sizeof(cmsUInt16Number);
EndOfThisString = BeginOfThisString + Len;
if (EndOfThisString > LargestPosition)
LargestPosition = EndOfThisString;
}
SizeOfTag = (LargestPosition * sizeof(wchar_t)) / sizeof(cmsUInt16Number);
if (SizeOfTag == 0)
{
Block = NULL;
NumOfWchar = 0;
}
else
{
Block = (wchar_t*) _cmsMalloc(self ->ContextID, SizeOfTag);
if (Block == NULL) goto Error;
NumOfWchar = SizeOfTag / sizeof(wchar_t);
if (!_cmsReadWCharArray(io, NumOfWchar, Block)) goto Error;
}
mlu ->MemPool = Block;
mlu ->PoolSize = SizeOfTag;
mlu ->PoolUsed = SizeOfTag;
*nItems = 1;
return (void*) mlu;
Error:
if (mlu) cmsMLUfree(mlu);
return NULL;
} | 1,196 |
1 | static void rd_use_partition ( VP9_COMP * cpi , const TileInfo * const tile , MODE_INFO * * mi_8x8 , TOKENEXTRA * * tp , int mi_row , int mi_col , BLOCK_SIZE bsize , int * rate , int64_t * dist , int do_recon , PC_TREE * pc_tree ) {
VP9_COMMON * const cm = & cpi -> common ;
MACROBLOCK * const x = & cpi -> mb ;
MACROBLOCKD * const xd = & x -> e_mbd ;
const int mis = cm -> mi_stride ;
const int bsl = b_width_log2 ( bsize ) ;
const int mi_step = num_4x4_blocks_wide_lookup [ bsize ] / 2 ;
const int bss = ( 1 << bsl ) / 4 ;
int i , pl ;
PARTITION_TYPE partition = PARTITION_NONE ;
BLOCK_SIZE subsize ;
ENTROPY_CONTEXT l [ 16 * MAX_MB_PLANE ] , a [ 16 * MAX_MB_PLANE ] ;
PARTITION_CONTEXT sl [ 8 ] , sa [ 8 ] ;
int last_part_rate = INT_MAX ;
int64_t last_part_dist = INT64_MAX ;
int64_t last_part_rd = INT64_MAX ;
int none_rate = INT_MAX ;
int64_t none_dist = INT64_MAX ;
int64_t none_rd = INT64_MAX ;
int chosen_rate = INT_MAX ;
int64_t chosen_dist = INT64_MAX ;
int64_t chosen_rd = INT64_MAX ;
BLOCK_SIZE sub_subsize = BLOCK_4X4 ;
int splits_below = 0 ;
BLOCK_SIZE bs_type = mi_8x8 [ 0 ] -> mbmi . sb_type ;
int do_partition_search = 1 ;
PICK_MODE_CONTEXT * ctx = & pc_tree -> none ;
if ( mi_row >= cm -> mi_rows || mi_col >= cm -> mi_cols ) return ;
assert ( num_4x4_blocks_wide_lookup [ bsize ] == num_4x4_blocks_high_lookup [ bsize ] ) ;
partition = partition_lookup [ bsl ] [ bs_type ] ;
subsize = get_subsize ( bsize , partition ) ;
pc_tree -> partitioning = partition ;
save_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
if ( bsize == BLOCK_16X16 && cpi -> oxcf . aq_mode ) {
set_offsets ( cpi , tile , mi_row , mi_col , bsize ) ;
x -> mb_energy = vp9_block_energy ( cpi , x , bsize ) ;
}
if ( do_partition_search && cpi -> sf . partition_search_type == SEARCH_PARTITION && cpi -> sf . adjust_partitioning_from_last_frame ) {
if ( partition == PARTITION_SPLIT && subsize > BLOCK_8X8 ) {
sub_subsize = get_subsize ( subsize , PARTITION_SPLIT ) ;
splits_below = 1 ;
for ( i = 0 ;
i < 4 ;
i ++ ) {
int jj = i >> 1 , ii = i & 0x01 ;
MODE_INFO * this_mi = mi_8x8 [ jj * bss * mis + ii * bss ] ;
if ( this_mi && this_mi -> mbmi . sb_type >= sub_subsize ) {
splits_below = 0 ;
}
}
}
if ( partition != PARTITION_NONE && ! splits_below && mi_row + ( mi_step >> 1 ) < cm -> mi_rows && mi_col + ( mi_step >> 1 ) < cm -> mi_cols ) {
pc_tree -> partitioning = PARTITION_NONE ;
rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & none_rate , & none_dist , bsize , ctx , INT64_MAX , 0 ) ;
pl = partition_plane_context ( xd , mi_row , mi_col , bsize ) ;
if ( none_rate < INT_MAX ) {
none_rate += cpi -> partition_cost [ pl ] [ PARTITION_NONE ] ;
none_rd = RDCOST ( x -> rdmult , x -> rddiv , none_rate , none_dist ) ;
}
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
mi_8x8 [ 0 ] -> mbmi . sb_type = bs_type ;
pc_tree -> partitioning = partition ;
}
}
switch ( partition ) {
case PARTITION_NONE : rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , bsize , ctx , INT64_MAX , 0 ) ;
break ;
case PARTITION_HORZ : rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , subsize , & pc_tree -> horizontal [ 0 ] , INT64_MAX , 0 ) ;
if ( last_part_rate != INT_MAX && bsize >= BLOCK_8X8 && mi_row + ( mi_step >> 1 ) < cm -> mi_rows ) {
int rt = 0 ;
int64_t dt = 0 ;
PICK_MODE_CONTEXT * ctx = & pc_tree -> horizontal [ 0 ] ;
update_state ( cpi , ctx , mi_row , mi_col , subsize , 0 ) ;
encode_superblock ( cpi , tp , 0 , mi_row , mi_col , subsize , ctx ) ;
rd_pick_sb_modes ( cpi , tile , mi_row + ( mi_step >> 1 ) , mi_col , & rt , & dt , subsize , & pc_tree -> horizontal [ 1 ] , INT64_MAX , 1 ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
last_part_rate = INT_MAX ;
last_part_dist = INT64_MAX ;
break ;
}
last_part_rate += rt ;
last_part_dist += dt ;
}
break ;
case PARTITION_VERT : rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , subsize , & pc_tree -> vertical [ 0 ] , INT64_MAX , 0 ) ;
if ( last_part_rate != INT_MAX && bsize >= BLOCK_8X8 && mi_col + ( mi_step >> 1 ) < cm -> mi_cols ) {
int rt = 0 ;
int64_t dt = 0 ;
PICK_MODE_CONTEXT * ctx = & pc_tree -> vertical [ 0 ] ;
update_state ( cpi , ctx , mi_row , mi_col , subsize , 0 ) ;
encode_superblock ( cpi , tp , 0 , mi_row , mi_col , subsize , ctx ) ;
rd_pick_sb_modes ( cpi , tile , mi_row , mi_col + ( mi_step >> 1 ) , & rt , & dt , subsize , & pc_tree -> vertical [ bsize > BLOCK_8X8 ] , INT64_MAX , 1 ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
last_part_rate = INT_MAX ;
last_part_dist = INT64_MAX ;
break ;
}
last_part_rate += rt ;
last_part_dist += dt ;
}
break ;
case PARTITION_SPLIT : if ( bsize == BLOCK_8X8 ) {
rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , subsize , pc_tree -> leaf_split [ 0 ] , INT64_MAX , 0 ) ;
break ;
}
last_part_rate = 0 ;
last_part_dist = 0 ;
for ( i = 0 ;
i < 4 ;
i ++ ) {
int x_idx = ( i & 1 ) * ( mi_step >> 1 ) ;
int y_idx = ( i >> 1 ) * ( mi_step >> 1 ) ;
int jj = i >> 1 , ii = i & 0x01 ;
int rt ;
int64_t dt ;
if ( ( mi_row + y_idx >= cm -> mi_rows ) || ( mi_col + x_idx >= cm -> mi_cols ) ) continue ;
rd_use_partition ( cpi , tile , mi_8x8 + jj * bss * mis + ii * bss , tp , mi_row + y_idx , mi_col + x_idx , subsize , & rt , & dt , i != 3 , pc_tree -> split [ i ] ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
last_part_rate = INT_MAX ;
last_part_dist = INT64_MAX ;
break ;
}
last_part_rate += rt ;
last_part_dist += dt ;
}
break ;
default : assert ( 0 ) ;
break ;
}
pl = partition_plane_context ( xd , mi_row , mi_col , bsize ) ;
if ( last_part_rate < INT_MAX ) {
last_part_rate += cpi -> partition_cost [ pl ] [ partition ] ;
last_part_rd = RDCOST ( x -> rdmult , x -> rddiv , last_part_rate , last_part_dist ) ;
}
if ( do_partition_search && cpi -> sf . adjust_partitioning_from_last_frame && cpi -> sf . partition_search_type == SEARCH_PARTITION && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 && ( mi_row + mi_step < cm -> mi_rows || mi_row + ( mi_step >> 1 ) == cm -> mi_rows ) && ( mi_col + mi_step < cm -> mi_cols || mi_col + ( mi_step >> 1 ) == cm -> mi_cols ) ) {
BLOCK_SIZE split_subsize = get_subsize ( bsize , PARTITION_SPLIT ) ;
chosen_rate = 0 ;
chosen_dist = 0 ;
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
pc_tree -> partitioning = PARTITION_SPLIT ;
for ( i = 0 ;
i < 4 ;
i ++ ) {
int x_idx = ( i & 1 ) * ( mi_step >> 1 ) ;
int y_idx = ( i >> 1 ) * ( mi_step >> 1 ) ;
int rt = 0 ;
int64_t dt = 0 ;
ENTROPY_CONTEXT l [ 16 * MAX_MB_PLANE ] , a [ 16 * MAX_MB_PLANE ] ;
PARTITION_CONTEXT sl [ 8 ] , sa [ 8 ] ;
if ( ( mi_row + y_idx >= cm -> mi_rows ) || ( mi_col + x_idx >= cm -> mi_cols ) ) continue ;
save_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
pc_tree -> split [ i ] -> partitioning = PARTITION_NONE ;
rd_pick_sb_modes ( cpi , tile , mi_row + y_idx , mi_col + x_idx , & rt , & dt , split_subsize , & pc_tree -> split [ i ] -> none , INT64_MAX , i ) ;
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
chosen_rate = INT_MAX ;
chosen_dist = INT64_MAX ;
break ;
}
chosen_rate += rt ;
chosen_dist += dt ;
if ( i != 3 ) encode_sb ( cpi , tile , tp , mi_row + y_idx , mi_col + x_idx , 0 , split_subsize , pc_tree -> split [ i ] ) ;
pl = partition_plane_context ( xd , mi_row + y_idx , mi_col + x_idx , split_subsize ) ;
chosen_rate += cpi -> partition_cost [ pl ] [ PARTITION_NONE ] ;
}
pl = partition_plane_context ( xd , mi_row , mi_col , bsize ) ;
if ( chosen_rate < INT_MAX ) {
chosen_rate += cpi -> partition_cost [ pl ] [ PARTITION_SPLIT ] ;
chosen_rd = RDCOST ( x -> rdmult , x -> rddiv , chosen_rate , chosen_dist ) ;
}
}
if ( last_part_rd < chosen_rd ) {
mi_8x8 [ 0 ] -> mbmi . sb_type = bsize ;
if ( bsize >= BLOCK_8X8 ) pc_tree -> partitioning = partition ;
chosen_rate = last_part_rate ;
chosen_dist = last_part_dist ;
chosen_rd = last_part_rd ;
}
if ( none_rd < chosen_rd ) {
if ( bsize >= BLOCK_8X8 ) pc_tree -> partitioning = PARTITION_NONE ;
chosen_rate = none_rate ;
chosen_dist = none_dist ;
}
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
if ( bsize == BLOCK_64X64 ) assert ( chosen_rate < INT_MAX && chosen_dist < INT64_MAX ) ;
if ( do_recon ) {
int output_enabled = ( bsize == BLOCK_64X64 ) ;
if ( ( cpi -> oxcf . aq_mode == COMPLEXITY_AQ ) && cm -> seg . update_map ) {
vp9_select_in_frame_q_segment ( cpi , mi_row , mi_col , output_enabled , chosen_rate ) ;
}
if ( cpi -> oxcf . aq_mode == CYCLIC_REFRESH_AQ ) vp9_cyclic_refresh_set_rate_and_dist_sb ( cpi -> cyclic_refresh , chosen_rate , chosen_dist ) ;
encode_sb ( cpi , tile , tp , mi_row , mi_col , output_enabled , bsize , pc_tree ) ;
}
* rate = chosen_rate ;
* dist = chosen_dist ;
} | static void rd_use_partition ( VP9_COMP * cpi , const TileInfo * const tile , MODE_INFO * * mi_8x8 , TOKENEXTRA * * tp , int mi_row , int mi_col , BLOCK_SIZE bsize , int * rate , int64_t * dist , int do_recon , PC_TREE * pc_tree ) {
VP9_COMMON * const cm = & cpi -> common ;
MACROBLOCK * const x = & cpi -> mb ;
MACROBLOCKD * const xd = & x -> e_mbd ;
const int mis = cm -> mi_stride ;
const int bsl = b_width_log2 ( bsize ) ;
const int mi_step = num_4x4_blocks_wide_lookup [ bsize ] / 2 ;
const int bss = ( 1 << bsl ) / 4 ;
int i , pl ;
PARTITION_TYPE partition = PARTITION_NONE ;
BLOCK_SIZE subsize ;
ENTROPY_CONTEXT l [ 16 * MAX_MB_PLANE ] , a [ 16 * MAX_MB_PLANE ] ;
PARTITION_CONTEXT sl [ 8 ] , sa [ 8 ] ;
int last_part_rate = INT_MAX ;
int64_t last_part_dist = INT64_MAX ;
int64_t last_part_rd = INT64_MAX ;
int none_rate = INT_MAX ;
int64_t none_dist = INT64_MAX ;
int64_t none_rd = INT64_MAX ;
int chosen_rate = INT_MAX ;
int64_t chosen_dist = INT64_MAX ;
int64_t chosen_rd = INT64_MAX ;
BLOCK_SIZE sub_subsize = BLOCK_4X4 ;
int splits_below = 0 ;
BLOCK_SIZE bs_type = mi_8x8 [ 0 ] -> mbmi . sb_type ;
int do_partition_search = 1 ;
PICK_MODE_CONTEXT * ctx = & pc_tree -> none ;
if ( mi_row >= cm -> mi_rows || mi_col >= cm -> mi_cols ) return ;
assert ( num_4x4_blocks_wide_lookup [ bsize ] == num_4x4_blocks_high_lookup [ bsize ] ) ;
partition = partition_lookup [ bsl ] [ bs_type ] ;
subsize = get_subsize ( bsize , partition ) ;
pc_tree -> partitioning = partition ;
save_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
if ( bsize == BLOCK_16X16 && cpi -> oxcf . aq_mode ) {
set_offsets ( cpi , tile , mi_row , mi_col , bsize ) ;
x -> mb_energy = vp9_block_energy ( cpi , x , bsize ) ;
}
if ( do_partition_search && cpi -> sf . partition_search_type == SEARCH_PARTITION && cpi -> sf . adjust_partitioning_from_last_frame ) {
if ( partition == PARTITION_SPLIT && subsize > BLOCK_8X8 ) {
sub_subsize = get_subsize ( subsize , PARTITION_SPLIT ) ;
splits_below = 1 ;
for ( i = 0 ;
i < 4 ;
i ++ ) {
int jj = i >> 1 , ii = i & 0x01 ;
MODE_INFO * this_mi = mi_8x8 [ jj * bss * mis + ii * bss ] ;
if ( this_mi && this_mi -> mbmi . sb_type >= sub_subsize ) {
splits_below = 0 ;
}
}
}
if ( partition != PARTITION_NONE && ! splits_below && mi_row + ( mi_step >> 1 ) < cm -> mi_rows && mi_col + ( mi_step >> 1 ) < cm -> mi_cols ) {
pc_tree -> partitioning = PARTITION_NONE ;
rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & none_rate , & none_dist , bsize , ctx , INT64_MAX , 0 ) ;
pl = partition_plane_context ( xd , mi_row , mi_col , bsize ) ;
if ( none_rate < INT_MAX ) {
none_rate += cpi -> partition_cost [ pl ] [ PARTITION_NONE ] ;
none_rd = RDCOST ( x -> rdmult , x -> rddiv , none_rate , none_dist ) ;
}
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
mi_8x8 [ 0 ] -> mbmi . sb_type = bs_type ;
pc_tree -> partitioning = partition ;
}
}
switch ( partition ) {
case PARTITION_NONE : rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , bsize , ctx , INT64_MAX , 0 ) ;
break ;
case PARTITION_HORZ : rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , subsize , & pc_tree -> horizontal [ 0 ] , INT64_MAX , 0 ) ;
if ( last_part_rate != INT_MAX && bsize >= BLOCK_8X8 && mi_row + ( mi_step >> 1 ) < cm -> mi_rows ) {
int rt = 0 ;
int64_t dt = 0 ;
PICK_MODE_CONTEXT * ctx = & pc_tree -> horizontal [ 0 ] ;
update_state ( cpi , ctx , mi_row , mi_col , subsize , 0 ) ;
encode_superblock ( cpi , tp , 0 , mi_row , mi_col , subsize , ctx ) ;
rd_pick_sb_modes ( cpi , tile , mi_row + ( mi_step >> 1 ) , mi_col , & rt , & dt , subsize , & pc_tree -> horizontal [ 1 ] , INT64_MAX , 1 ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
last_part_rate = INT_MAX ;
last_part_dist = INT64_MAX ;
break ;
}
last_part_rate += rt ;
last_part_dist += dt ;
}
break ;
case PARTITION_VERT : rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , subsize , & pc_tree -> vertical [ 0 ] , INT64_MAX , 0 ) ;
if ( last_part_rate != INT_MAX && bsize >= BLOCK_8X8 && mi_col + ( mi_step >> 1 ) < cm -> mi_cols ) {
int rt = 0 ;
int64_t dt = 0 ;
PICK_MODE_CONTEXT * ctx = & pc_tree -> vertical [ 0 ] ;
update_state ( cpi , ctx , mi_row , mi_col , subsize , 0 ) ;
encode_superblock ( cpi , tp , 0 , mi_row , mi_col , subsize , ctx ) ;
rd_pick_sb_modes ( cpi , tile , mi_row , mi_col + ( mi_step >> 1 ) , & rt , & dt , subsize , & pc_tree -> vertical [ bsize > BLOCK_8X8 ] , INT64_MAX , 1 ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
last_part_rate = INT_MAX ;
last_part_dist = INT64_MAX ;
break ;
}
last_part_rate += rt ;
last_part_dist += dt ;
}
break ;
case PARTITION_SPLIT : if ( bsize == BLOCK_8X8 ) {
rd_pick_sb_modes ( cpi , tile , mi_row , mi_col , & last_part_rate , & last_part_dist , subsize , pc_tree -> leaf_split [ 0 ] , INT64_MAX , 0 ) ;
break ;
}
last_part_rate = 0 ;
last_part_dist = 0 ;
for ( i = 0 ;
i < 4 ;
i ++ ) {
int x_idx = ( i & 1 ) * ( mi_step >> 1 ) ;
int y_idx = ( i >> 1 ) * ( mi_step >> 1 ) ;
int jj = i >> 1 , ii = i & 0x01 ;
int rt ;
int64_t dt ;
if ( ( mi_row + y_idx >= cm -> mi_rows ) || ( mi_col + x_idx >= cm -> mi_cols ) ) continue ;
rd_use_partition ( cpi , tile , mi_8x8 + jj * bss * mis + ii * bss , tp , mi_row + y_idx , mi_col + x_idx , subsize , & rt , & dt , i != 3 , pc_tree -> split [ i ] ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
last_part_rate = INT_MAX ;
last_part_dist = INT64_MAX ;
break ;
}
last_part_rate += rt ;
last_part_dist += dt ;
}
break ;
default : assert ( 0 ) ;
break ;
}
pl = partition_plane_context ( xd , mi_row , mi_col , bsize ) ;
if ( last_part_rate < INT_MAX ) {
last_part_rate += cpi -> partition_cost [ pl ] [ partition ] ;
last_part_rd = RDCOST ( x -> rdmult , x -> rddiv , last_part_rate , last_part_dist ) ;
}
if ( do_partition_search && cpi -> sf . adjust_partitioning_from_last_frame && cpi -> sf . partition_search_type == SEARCH_PARTITION && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 && ( mi_row + mi_step < cm -> mi_rows || mi_row + ( mi_step >> 1 ) == cm -> mi_rows ) && ( mi_col + mi_step < cm -> mi_cols || mi_col + ( mi_step >> 1 ) == cm -> mi_cols ) ) {
BLOCK_SIZE split_subsize = get_subsize ( bsize , PARTITION_SPLIT ) ;
chosen_rate = 0 ;
chosen_dist = 0 ;
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
pc_tree -> partitioning = PARTITION_SPLIT ;
for ( i = 0 ;
i < 4 ;
i ++ ) {
int x_idx = ( i & 1 ) * ( mi_step >> 1 ) ;
int y_idx = ( i >> 1 ) * ( mi_step >> 1 ) ;
int rt = 0 ;
int64_t dt = 0 ;
ENTROPY_CONTEXT l [ 16 * MAX_MB_PLANE ] , a [ 16 * MAX_MB_PLANE ] ;
PARTITION_CONTEXT sl [ 8 ] , sa [ 8 ] ;
if ( ( mi_row + y_idx >= cm -> mi_rows ) || ( mi_col + x_idx >= cm -> mi_cols ) ) continue ;
save_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
pc_tree -> split [ i ] -> partitioning = PARTITION_NONE ;
rd_pick_sb_modes ( cpi , tile , mi_row + y_idx , mi_col + x_idx , & rt , & dt , split_subsize , & pc_tree -> split [ i ] -> none , INT64_MAX , i ) ;
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
if ( rt == INT_MAX || dt == INT64_MAX ) {
chosen_rate = INT_MAX ;
chosen_dist = INT64_MAX ;
break ;
}
chosen_rate += rt ;
chosen_dist += dt ;
if ( i != 3 ) encode_sb ( cpi , tile , tp , mi_row + y_idx , mi_col + x_idx , 0 , split_subsize , pc_tree -> split [ i ] ) ;
pl = partition_plane_context ( xd , mi_row + y_idx , mi_col + x_idx , split_subsize ) ;
chosen_rate += cpi -> partition_cost [ pl ] [ PARTITION_NONE ] ;
}
pl = partition_plane_context ( xd , mi_row , mi_col , bsize ) ;
if ( chosen_rate < INT_MAX ) {
chosen_rate += cpi -> partition_cost [ pl ] [ PARTITION_SPLIT ] ;
chosen_rd = RDCOST ( x -> rdmult , x -> rddiv , chosen_rate , chosen_dist ) ;
}
}
if ( last_part_rd < chosen_rd ) {
mi_8x8 [ 0 ] -> mbmi . sb_type = bsize ;
if ( bsize >= BLOCK_8X8 ) pc_tree -> partitioning = partition ;
chosen_rate = last_part_rate ;
chosen_dist = last_part_dist ;
chosen_rd = last_part_rd ;
}
if ( none_rd < chosen_rd ) {
if ( bsize >= BLOCK_8X8 ) pc_tree -> partitioning = PARTITION_NONE ;
chosen_rate = none_rate ;
chosen_dist = none_dist ;
}
restore_context ( cpi , mi_row , mi_col , a , l , sa , sl , bsize ) ;
if ( bsize == BLOCK_64X64 ) assert ( chosen_rate < INT_MAX && chosen_dist < INT64_MAX ) ;
if ( do_recon ) {
int output_enabled = ( bsize == BLOCK_64X64 ) ;
if ( ( cpi -> oxcf . aq_mode == COMPLEXITY_AQ ) && cm -> seg . update_map ) {
vp9_select_in_frame_q_segment ( cpi , mi_row , mi_col , output_enabled , chosen_rate ) ;
}
if ( cpi -> oxcf . aq_mode == CYCLIC_REFRESH_AQ ) vp9_cyclic_refresh_set_rate_and_dist_sb ( cpi -> cyclic_refresh , chosen_rate , chosen_dist ) ;
encode_sb ( cpi , tile , tp , mi_row , mi_col , output_enabled , bsize , pc_tree ) ;
}
* rate = chosen_rate ;
* dist = chosen_dist ;
} | 1,197 |
0 | void AllocateDataSet(cmsIT8* it8)
{
TABLE* t = GetTable(it8);
if (t -> Data) return; // Already allocated
t-> nSamples = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_FIELDS"));
t-> nPatches = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_SETS"));
if (t -> nSamples < 0 || t->nSamples > 0x7ffe || t->nPatches < 0 || t->nPatches > 0x7ffe)
{
SynError(it8, "AllocateDataSet: too much data");
}
else {
t->Data = (char**)AllocChunk(it8, ((cmsUInt32Number)t->nSamples + 1) * ((cmsUInt32Number)t->nPatches + 1) * sizeof(char*));
if (t->Data == NULL) {
SynError(it8, "AllocateDataSet: Unable to allocate data array");
}
}
} | void AllocateDataSet(cmsIT8* it8)
{
TABLE* t = GetTable(it8);
if (t -> Data) return;
t-> nSamples = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_FIELDS"));
t-> nPatches = atoi(cmsIT8GetProperty(it8, "NUMBER_OF_SETS"));
if (t -> nSamples < 0 || t->nSamples > 0x7ffe || t->nPatches < 0 || t->nPatches > 0x7ffe)
{
SynError(it8, "AllocateDataSet: too much data");
}
else {
t->Data = (char**)AllocChunk(it8, ((cmsUInt32Number)t->nSamples + 1) * ((cmsUInt32Number)t->nPatches + 1) * sizeof(char*));
if (t->Data == NULL) {
SynError(it8, "AllocateDataSet: Unable to allocate data array");
}
}
} | 1,201 |
0 | static void qemuMonitorJSONHandleVNCConnect ( qemuMonitorPtr mon , virJSONValuePtr data ) {
qemuMonitorJSONHandleVNC ( mon , data , VIR_DOMAIN_EVENT_GRAPHICS_CONNECT ) ;
} | static void qemuMonitorJSONHandleVNCConnect ( qemuMonitorPtr mon , virJSONValuePtr data ) {
qemuMonitorJSONHandleVNC ( mon , data , VIR_DOMAIN_EVENT_GRAPHICS_CONNECT ) ;
} | 1,202 |
1 | _AFmoduleinst _af_ima_adpcm_init_decompress (_Track *track, AFvirtualfile *fh,
bool seekok, bool headerless, AFframecount *chunkframes)
{
_AFmoduleinst ret = _AFnewmodinst(&ima_adpcm_decompress);
ima_adpcm_data *d;
AUpvlist pv;
long l;
assert(af_ftell(fh) == track->fpos_first_frame);
d = (ima_adpcm_data *) _af_malloc(sizeof (ima_adpcm_data));
d->track = track;
d->fh = fh;
d->track->frames2ignore = 0;
d->track->fpos_next_frame = d->track->fpos_first_frame;
pv = d->track->f.compressionParams;
if (_af_pv_getlong(pv, _AF_SAMPLES_PER_BLOCK, &l))
d->samplesPerBlock = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "samples per block not set");
if (_af_pv_getlong(pv, _AF_BLOCK_SIZE, &l))
d->blockAlign = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "block size not set");
*chunkframes = d->samplesPerBlock / d->track->f.channelCount;
ret.modspec = d;
return ret;
} | _AFmoduleinst _af_ima_adpcm_init_decompress (_Track *track, AFvirtualfile *fh,
bool seekok, bool headerless, AFframecount *chunkframes)
{
_AFmoduleinst ret = _AFnewmodinst(&ima_adpcm_decompress);
ima_adpcm_data *d;
AUpvlist pv;
long l;
assert(af_ftell(fh) == track->fpos_first_frame);
d = (ima_adpcm_data *) _af_malloc(sizeof (ima_adpcm_data));
d->track = track;
d->fh = fh;
d->track->frames2ignore = 0;
d->track->fpos_next_frame = d->track->fpos_first_frame;
pv = d->track->f.compressionParams;
if (_af_pv_getlong(pv, _AF_SAMPLES_PER_BLOCK, &l))
d->samplesPerBlock = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "samples per block not set");
if (_af_pv_getlong(pv, _AF_BLOCK_SIZE, &l))
d->blockAlign = l;
else
_af_error(AF_BAD_CODEC_CONFIG, "block size not set");
*chunkframes = d->samplesPerBlock / d->track->f.channelCount;
ret.modspec = d;
return ret;
} | 1,204 |
1 | static void arm_gic_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->reset = arm_gic_common_reset; dc->realize = arm_gic_common_realize; dc->props = arm_gic_common_properties; dc->vmsd = &vmstate_gic; dc->no_user = 1; } | static void arm_gic_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->reset = arm_gic_common_reset; dc->realize = arm_gic_common_realize; dc->props = arm_gic_common_properties; dc->vmsd = &vmstate_gic; dc->no_user = 1; } | 1,206 |
1 | static void ima_adpcm_reset1 (_AFmoduleinst *i)
{
ima_adpcm_data *d = (ima_adpcm_data *) i->modspec;
AFframecount nextTrackFrame;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
nextTrackFrame = d->track->nextfframe;
d->track->nextfframe = (nextTrackFrame / framesPerBlock) *
framesPerBlock;
d->framesToIgnore = nextTrackFrame - d->track->nextfframe;
/* postroll = frames2ignore */
} | static void ima_adpcm_reset1 (_AFmoduleinst *i)
{
ima_adpcm_data *d = (ima_adpcm_data *) i->modspec;
AFframecount nextTrackFrame;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
nextTrackFrame = d->track->nextfframe;
d->track->nextfframe = (nextTrackFrame / framesPerBlock) *
framesPerBlock;
d->framesToIgnore = nextTrackFrame - d->track->nextfframe;
} | 1,207 |
1 | cmsNAMEDCOLORLIST* CMSEXPORT cmsAllocNamedColorList(cmsContext ContextID, cmsUInt32Number n, cmsUInt32Number ColorantCount, const char* Prefix, const char* Suffix)
{
cmsNAMEDCOLORLIST* v = (cmsNAMEDCOLORLIST*) _cmsMallocZero(ContextID, sizeof(cmsNAMEDCOLORLIST));
if (v == NULL) return NULL;
v ->List = NULL;
v ->nColors = 0;
v ->ContextID = ContextID;
while (v -> Allocated < n)
GrowNamedColorList(v);
strncpy(v ->Prefix, Prefix, sizeof(v ->Prefix));
strncpy(v ->Suffix, Suffix, sizeof(v ->Suffix));
v->Prefix[32] = v->Suffix[32] = 0;
v -> ColorantCount = ColorantCount;
return v;
} | cmsNAMEDCOLORLIST* CMSEXPORT cmsAllocNamedColorList(cmsContext ContextID, cmsUInt32Number n, cmsUInt32Number ColorantCount, const char* Prefix, const char* Suffix)
{
cmsNAMEDCOLORLIST* v = (cmsNAMEDCOLORLIST*) _cmsMallocZero(ContextID, sizeof(cmsNAMEDCOLORLIST));
if (v == NULL) return NULL;
v ->List = NULL;
v ->nColors = 0;
v ->ContextID = ContextID;
while (v -> Allocated < n)
GrowNamedColorList(v);
strncpy(v ->Prefix, Prefix, sizeof(v ->Prefix));
strncpy(v ->Suffix, Suffix, sizeof(v ->Suffix));
v->Prefix[32] = v->Suffix[32] = 0;
v -> ColorantCount = ColorantCount;
return v;
} | 1,208 |
0 | err_status_t srtp_shutdown ( ) {
err_status_t status ;
status = crypto_kernel_shutdown ( ) ;
if ( status ) return status ;
return err_status_ok ;
} | err_status_t srtp_shutdown ( ) {
err_status_t status ;
status = crypto_kernel_shutdown ( ) ;
if ( status ) return status ;
return err_status_ok ;
} | 1,209 |
0 | cmsNAMEDCOLORLIST* CMSEXPORT cmsAllocNamedColorList(cmsContext ContextID, cmsUInt32Number n, cmsUInt32Number ColorantCount, const char* Prefix, const char* Suffix)
{
cmsNAMEDCOLORLIST* v = (cmsNAMEDCOLORLIST*) _cmsMallocZero(ContextID, sizeof(cmsNAMEDCOLORLIST));
if (v == NULL) return NULL;
v ->List = NULL;
v ->nColors = 0;
v ->ContextID = ContextID;
while (v -> Allocated < n)
GrowNamedColorList(v);
strncpy(v ->Prefix, Prefix, sizeof(v ->Prefix)-1);
strncpy(v ->Suffix, Suffix, sizeof(v ->Suffix)-1);
v->Prefix[32] = v->Suffix[32] = 0;
v -> ColorantCount = ColorantCount;
return v;
} | cmsNAMEDCOLORLIST* CMSEXPORT cmsAllocNamedColorList(cmsContext ContextID, cmsUInt32Number n, cmsUInt32Number ColorantCount, const char* Prefix, const char* Suffix)
{
cmsNAMEDCOLORLIST* v = (cmsNAMEDCOLORLIST*) _cmsMallocZero(ContextID, sizeof(cmsNAMEDCOLORLIST));
if (v == NULL) return NULL;
v ->List = NULL;
v ->nColors = 0;
v ->ContextID = ContextID;
while (v -> Allocated < n)
GrowNamedColorList(v);
strncpy(v ->Prefix, Prefix, sizeof(v ->Prefix)-1);
strncpy(v ->Suffix, Suffix, sizeof(v ->Suffix)-1);
v->Prefix[32] = v->Suffix[32] = 0;
v -> ColorantCount = ColorantCount;
return v;
} | 1,210 |
1 | static CharDriverState *qemu_chr_open_tty(QemuOpts *opts) { const char *filename = qemu_opt_get(opts, "path"); CharDriverState *chr; int fd; TFR(fd = open(filename, O_RDWR | O_NONBLOCK)); if (fd < 0) { return NULL; } tty_serial_init(fd, 115200, 'N', 8, 1); chr = qemu_chr_open_fd(fd, fd); if (!chr) { close(fd); return NULL; } chr->chr_ioctl = tty_serial_ioctl; chr->chr_close = qemu_chr_close_tty; return chr; } | static CharDriverState *qemu_chr_open_tty(QemuOpts *opts) { const char *filename = qemu_opt_get(opts, "path"); CharDriverState *chr; int fd; TFR(fd = open(filename, O_RDWR | O_NONBLOCK)); if (fd < 0) { return NULL; } tty_serial_init(fd, 115200, 'N', 8, 1); chr = qemu_chr_open_fd(fd, fd); if (!chr) { close(fd); return NULL; } chr->chr_ioctl = tty_serial_ioctl; chr->chr_close = qemu_chr_close_tty; return chr; } | 1,211 |
1 | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
return hb;
} | static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock);
return hb;
} | 1,212 |
1 | void _af_adpcm_coder (int16_t *indata, uint8_t *outdata, int len,
struct adpcm_state *state)
{
int16_t *inp; /* Input buffer pointer */
uint8_t *outp; /* Output buffer pointer */
int val; /* Current input sample value */
int sign; /* Current adpcm sign bit */
int delta; /* Current adpcm output value */
int diff; /* Difference between val and valprev */
int step; /* Stepsize */
int valpred; /* Predicted output value */
int vpdiff; /* Current change to valpred */
int index; /* Current step change index */
int outputbuffer; /* place to keep previous 4-bit value */
int bufferstep; /* toggle between outputbuffer/output */
outp = outdata;
inp = indata;
valpred = state->valprev;
index = state->index;
step = stepsizeTable[index];
bufferstep = 1;
for ( ; len > 0 ; len-- ) {
val = *inp++;
/* Step 1 - compute difference with previous value */
diff = val - valpred;
sign = (diff < 0) ? 8 : 0;
if ( sign ) diff = (-diff);
/* Step 2 - Divide and clamp */
/* Note:
** This code *approximately* computes:
** delta = diff*4/step;
** vpdiff = (delta+0.5)*step/4;
** but in shift step bits are dropped. The net result of this is
** that even if you have fast mul/div hardware you cannot put it to
** good use since the fixup would be too expensive.
*/
delta = 0;
vpdiff = (step >> 3);
if ( diff >= step ) {
delta = 4;
diff -= step;
vpdiff += step;
}
step >>= 1;
if ( diff >= step ) {
delta |= 2;
diff -= step;
vpdiff += step;
}
step >>= 1;
if ( diff >= step ) {
delta |= 1;
vpdiff += step;
}
/* Step 3 - Update previous value */
if ( sign )
valpred -= vpdiff;
else
valpred += vpdiff;
/* Step 4 - Clamp previous value to 16 bits */
if ( valpred > 32767 )
valpred = 32767;
else if ( valpred < -32768 )
valpred = -32768;
/* Step 5 - Assemble value, update index and step values */
delta |= sign;
index += indexTable[delta];
if ( index < 0 ) index = 0;
if ( index > 88 ) index = 88;
step = stepsizeTable[index];
/* Step 6 - Output value */
if ( bufferstep ) {
outputbuffer = delta & 0x0f;
} else {
*outp++ = ((delta << 4) & 0xf0) | outputbuffer;
}
bufferstep = !bufferstep;
}
/* Output last step, if needed */
if ( !bufferstep )
*outp++ = outputbuffer;
state->valprev = valpred;
state->index = index;
} | void _af_adpcm_coder (int16_t *indata, uint8_t *outdata, int len,
struct adpcm_state *state)
{
int16_t *inp;
uint8_t *outp;
int val;
int sign;
int delta;
int diff;
int step;
int valpred;
int vpdiff;
int index;
int outputbuffer;
int bufferstep;
outp = outdata;
inp = indata;
valpred = state->valprev;
index = state->index;
step = stepsizeTable[index];
bufferstep = 1;
for ( ; len > 0 ; len-- ) {
val = *inp++;
diff = val - valpred;
sign = (diff < 0) ? 8 : 0;
if ( sign ) diff = (-diff);
delta = 0;
vpdiff = (step >> 3);
if ( diff >= step ) {
delta = 4;
diff -= step;
vpdiff += step;
}
step >>= 1;
if ( diff >= step ) {
delta |= 2;
diff -= step;
vpdiff += step;
}
step >>= 1;
if ( diff >= step ) {
delta |= 1;
vpdiff += step;
}
if ( sign )
valpred -= vpdiff;
else
valpred += vpdiff;
if ( valpred > 32767 )
valpred = 32767;
else if ( valpred < -32768 )
valpred = -32768;
delta |= sign;
index += indexTable[delta];
if ( index < 0 ) index = 0;
if ( index > 88 ) index = 88;
step = stepsizeTable[index];
if ( bufferstep ) {
outputbuffer = delta & 0x0f;
} else {
*outp++ = ((delta << 4) & 0xf0) | outputbuffer;
}
bufferstep = !bufferstep;
}
if ( !bufferstep )
*outp++ = outputbuffer;
state->valprev = valpred;
state->index = index;
} | 1,213 |
1 | void object_property_set_qobject(Object *obj, QObject *value, const char *name, Error **errp) { Visitor *v; /* TODO: Should we reject, rather than ignore, excess input? */ v = qobject_input_visitor_new(value, false); object_property_set(obj, v, name, errp); visit_free(v); } | void object_property_set_qobject(Object *obj, QObject *value, const char *name, Error **errp) { Visitor *v; v = qobject_input_visitor_new(value, false); object_property_set(obj, v, name, errp); visit_free(v); } | 1,215 |
1 | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
spin_unlock(&hb->lock);
drop_futex_key_refs(&q->key);
} | queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
spin_unlock(&hb->lock);
drop_futex_key_refs(&q->key);
} | 1,216 |
0 | static void cirrus_init_common ( CirrusVGAState * s , Object * owner , int device_id , int is_pci , MemoryRegion * system_memory , MemoryRegion * system_io ) {
int i ;
static int inited ;
if ( ! inited ) {
inited = 1 ;
for ( i = 0 ;
i < 256 ;
i ++ ) rop_to_index [ i ] = CIRRUS_ROP_NOP_INDEX ;
rop_to_index [ CIRRUS_ROP_0 ] = 0 ;
rop_to_index [ CIRRUS_ROP_SRC_AND_DST ] = 1 ;
rop_to_index [ CIRRUS_ROP_NOP ] = 2 ;
rop_to_index [ CIRRUS_ROP_SRC_AND_NOTDST ] = 3 ;
rop_to_index [ CIRRUS_ROP_NOTDST ] = 4 ;
rop_to_index [ CIRRUS_ROP_SRC ] = 5 ;
rop_to_index [ CIRRUS_ROP_1 ] = 6 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_AND_DST ] = 7 ;
rop_to_index [ CIRRUS_ROP_SRC_XOR_DST ] = 8 ;
rop_to_index [ CIRRUS_ROP_SRC_OR_DST ] = 9 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_OR_NOTDST ] = 10 ;
rop_to_index [ CIRRUS_ROP_SRC_NOTXOR_DST ] = 11 ;
rop_to_index [ CIRRUS_ROP_SRC_OR_NOTDST ] = 12 ;
rop_to_index [ CIRRUS_ROP_NOTSRC ] = 13 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_OR_DST ] = 14 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_AND_NOTDST ] = 15 ;
s -> device_id = device_id ;
if ( is_pci ) s -> bustype = CIRRUS_BUSTYPE_PCI ;
else s -> bustype = CIRRUS_BUSTYPE_ISA ;
}
memory_region_init_io ( & s -> cirrus_vga_io , owner , & cirrus_vga_io_ops , s , "cirrus-io" , 0x30 ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_vga_io ) ;
memory_region_add_subregion ( system_io , 0x3b0 , & s -> cirrus_vga_io ) ;
memory_region_init ( & s -> low_mem_container , owner , "cirrus-lowmem-container" , 0x20000 ) ;
memory_region_init_io ( & s -> low_mem , owner , & cirrus_vga_mem_ops , s , "cirrus-low-memory" , 0x20000 ) ;
memory_region_add_subregion ( & s -> low_mem_container , 0 , & s -> low_mem ) ;
for ( i = 0 ;
i < 2 ;
++ i ) {
static const char * names [ ] = {
"vga.bank0" , "vga.bank1" }
;
MemoryRegion * bank = & s -> cirrus_bank [ i ] ;
memory_region_init_alias ( bank , owner , names [ i ] , & s -> vga . vram , 0 , 0x8000 ) ;
memory_region_set_enabled ( bank , false ) ;
memory_region_add_subregion_overlap ( & s -> low_mem_container , i * 0x8000 , bank , 1 ) ;
}
memory_region_add_subregion_overlap ( system_memory , 0x000a0000 , & s -> low_mem_container , 1 ) ;
memory_region_set_coalescing ( & s -> low_mem ) ;
memory_region_init_io ( & s -> cirrus_linear_io , owner , & cirrus_linear_io_ops , s , "cirrus-linear-io" , s -> vga . vram_size_mb * 1024 * 1024 ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_linear_io ) ;
memory_region_init_io ( & s -> cirrus_linear_bitblt_io , owner , & cirrus_linear_bitblt_io_ops , s , "cirrus-bitblt-mmio" , 0x400000 ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_linear_bitblt_io ) ;
memory_region_init_io ( & s -> cirrus_mmio_io , owner , & cirrus_mmio_io_ops , s , "cirrus-mmio" , CIRRUS_PNPMMIO_SIZE ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_mmio_io ) ;
s -> real_vram_size = ( s -> device_id == CIRRUS_ID_CLGD5446 ) ? 4096 * 1024 : 2048 * 1024 ;
s -> cirrus_addr_mask = s -> real_vram_size - 1 ;
s -> linear_mmio_mask = s -> real_vram_size - 256 ;
s -> vga . get_bpp = cirrus_get_bpp ;
s -> vga . get_offsets = cirrus_get_offsets ;
s -> vga . get_resolution = cirrus_get_resolution ;
s -> vga . cursor_invalidate = cirrus_cursor_invalidate ;
s -> vga . cursor_draw_line = cirrus_cursor_draw_line ;
qemu_register_reset ( cirrus_reset , s ) ;
} | static void cirrus_init_common ( CirrusVGAState * s , Object * owner , int device_id , int is_pci , MemoryRegion * system_memory , MemoryRegion * system_io ) {
int i ;
static int inited ;
if ( ! inited ) {
inited = 1 ;
for ( i = 0 ;
i < 256 ;
i ++ ) rop_to_index [ i ] = CIRRUS_ROP_NOP_INDEX ;
rop_to_index [ CIRRUS_ROP_0 ] = 0 ;
rop_to_index [ CIRRUS_ROP_SRC_AND_DST ] = 1 ;
rop_to_index [ CIRRUS_ROP_NOP ] = 2 ;
rop_to_index [ CIRRUS_ROP_SRC_AND_NOTDST ] = 3 ;
rop_to_index [ CIRRUS_ROP_NOTDST ] = 4 ;
rop_to_index [ CIRRUS_ROP_SRC ] = 5 ;
rop_to_index [ CIRRUS_ROP_1 ] = 6 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_AND_DST ] = 7 ;
rop_to_index [ CIRRUS_ROP_SRC_XOR_DST ] = 8 ;
rop_to_index [ CIRRUS_ROP_SRC_OR_DST ] = 9 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_OR_NOTDST ] = 10 ;
rop_to_index [ CIRRUS_ROP_SRC_NOTXOR_DST ] = 11 ;
rop_to_index [ CIRRUS_ROP_SRC_OR_NOTDST ] = 12 ;
rop_to_index [ CIRRUS_ROP_NOTSRC ] = 13 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_OR_DST ] = 14 ;
rop_to_index [ CIRRUS_ROP_NOTSRC_AND_NOTDST ] = 15 ;
s -> device_id = device_id ;
if ( is_pci ) s -> bustype = CIRRUS_BUSTYPE_PCI ;
else s -> bustype = CIRRUS_BUSTYPE_ISA ;
}
memory_region_init_io ( & s -> cirrus_vga_io , owner , & cirrus_vga_io_ops , s , "cirrus-io" , 0x30 ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_vga_io ) ;
memory_region_add_subregion ( system_io , 0x3b0 , & s -> cirrus_vga_io ) ;
memory_region_init ( & s -> low_mem_container , owner , "cirrus-lowmem-container" , 0x20000 ) ;
memory_region_init_io ( & s -> low_mem , owner , & cirrus_vga_mem_ops , s , "cirrus-low-memory" , 0x20000 ) ;
memory_region_add_subregion ( & s -> low_mem_container , 0 , & s -> low_mem ) ;
for ( i = 0 ;
i < 2 ;
++ i ) {
static const char * names [ ] = {
"vga.bank0" , "vga.bank1" }
;
MemoryRegion * bank = & s -> cirrus_bank [ i ] ;
memory_region_init_alias ( bank , owner , names [ i ] , & s -> vga . vram , 0 , 0x8000 ) ;
memory_region_set_enabled ( bank , false ) ;
memory_region_add_subregion_overlap ( & s -> low_mem_container , i * 0x8000 , bank , 1 ) ;
}
memory_region_add_subregion_overlap ( system_memory , 0x000a0000 , & s -> low_mem_container , 1 ) ;
memory_region_set_coalescing ( & s -> low_mem ) ;
memory_region_init_io ( & s -> cirrus_linear_io , owner , & cirrus_linear_io_ops , s , "cirrus-linear-io" , s -> vga . vram_size_mb * 1024 * 1024 ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_linear_io ) ;
memory_region_init_io ( & s -> cirrus_linear_bitblt_io , owner , & cirrus_linear_bitblt_io_ops , s , "cirrus-bitblt-mmio" , 0x400000 ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_linear_bitblt_io ) ;
memory_region_init_io ( & s -> cirrus_mmio_io , owner , & cirrus_mmio_io_ops , s , "cirrus-mmio" , CIRRUS_PNPMMIO_SIZE ) ;
memory_region_set_flush_coalesced ( & s -> cirrus_mmio_io ) ;
s -> real_vram_size = ( s -> device_id == CIRRUS_ID_CLGD5446 ) ? 4096 * 1024 : 2048 * 1024 ;
s -> cirrus_addr_mask = s -> real_vram_size - 1 ;
s -> linear_mmio_mask = s -> real_vram_size - 256 ;
s -> vga . get_bpp = cirrus_get_bpp ;
s -> vga . get_offsets = cirrus_get_offsets ;
s -> vga . get_resolution = cirrus_get_resolution ;
s -> vga . cursor_invalidate = cirrus_cursor_invalidate ;
s -> vga . cursor_draw_line = cirrus_cursor_draw_line ;
qemu_register_reset ( cirrus_reset , s ) ;
} | 1,217 |
1 | cmsBool CMSEXPORT cmsAppendNamedColor(cmsNAMEDCOLORLIST* NamedColorList,
const char* Name,
cmsUInt16Number PCS[3], cmsUInt16Number Colorant[cmsMAXCHANNELS])
{
cmsUInt32Number i;
if (NamedColorList == NULL) return FALSE;
if (NamedColorList ->nColors + 1 > NamedColorList ->Allocated) {
if (!GrowNamedColorList(NamedColorList)) return FALSE;
}
for (i=0; i < NamedColorList ->ColorantCount; i++)
NamedColorList ->List[NamedColorList ->nColors].DeviceColorant[i] = Colorant == NULL? 0 : Colorant[i];
for (i=0; i < 3; i++)
NamedColorList ->List[NamedColorList ->nColors].PCS[i] = PCS == NULL ? 0 : PCS[i];
if (Name != NULL) {
strncpy(NamedColorList ->List[NamedColorList ->nColors].Name, Name,
sizeof(NamedColorList ->List[NamedColorList ->nColors].Name));
NamedColorList ->List[NamedColorList ->nColors].Name[cmsMAX_PATH-1] = 0;
}
else
NamedColorList ->List[NamedColorList ->nColors].Name[0] = 0;
NamedColorList ->nColors++;
return TRUE;
} | cmsBool CMSEXPORT cmsAppendNamedColor(cmsNAMEDCOLORLIST* NamedColorList,
const char* Name,
cmsUInt16Number PCS[3], cmsUInt16Number Colorant[cmsMAXCHANNELS])
{
cmsUInt32Number i;
if (NamedColorList == NULL) return FALSE;
if (NamedColorList ->nColors + 1 > NamedColorList ->Allocated) {
if (!GrowNamedColorList(NamedColorList)) return FALSE;
}
for (i=0; i < NamedColorList ->ColorantCount; i++)
NamedColorList ->List[NamedColorList ->nColors].DeviceColorant[i] = Colorant == NULL? 0 : Colorant[i];
for (i=0; i < 3; i++)
NamedColorList ->List[NamedColorList ->nColors].PCS[i] = PCS == NULL ? 0 : PCS[i];
if (Name != NULL) {
strncpy(NamedColorList ->List[NamedColorList ->nColors].Name, Name,
sizeof(NamedColorList ->List[NamedColorList ->nColors].Name));
NamedColorList ->List[NamedColorList ->nColors].Name[cmsMAX_PATH-1] = 0;
}
else
NamedColorList ->List[NamedColorList ->nColors].Name[0] = 0;
NamedColorList ->nColors++;
return TRUE;
} | 1,219 |
1 | static int http_receive_data(HTTPContext *c) { int len; HTTPContext *c1; if (c->buffer_ptr >= c->buffer_end) { FFStream *feed = c->stream; /* a packet has been received : write it in the store, except if header */ if (c->data_count > FFM_PACKET_SIZE) { // printf("writing pos=0x%Lx size=0x%Lx\n", feed->feed_write_index, feed->feed_size); /* XXX: use llseek or url_seek */ lseek(c->feed_fd, feed->feed_write_index, SEEK_SET); write(c->feed_fd, c->buffer, FFM_PACKET_SIZE); feed->feed_write_index += FFM_PACKET_SIZE; /* update file size */ if (feed->feed_write_index > c->stream->feed_size) feed->feed_size = feed->feed_write_index; /* handle wrap around if max file size reached */ if (feed->feed_write_index >= c->stream->feed_max_size) feed->feed_write_index = FFM_PACKET_SIZE; /* write index */ ffm_write_write_index(c->feed_fd, feed->feed_write_index); /* wake up any waiting connections */ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) { if (c1->state == HTTPSTATE_WAIT_FEED && c1->stream->feed == c->stream->feed) { c1->state = HTTPSTATE_SEND_DATA; } } } else { /* We have a header in our hands that contains useful data */ AVFormatContext s; ByteIOContext *pb = &s.pb; int i; memset(&s, 0, sizeof(s)); url_open_buf(pb, c->buffer, c->buffer_end - c->buffer, URL_RDONLY); pb->buf_end = c->buffer_end; /* ?? */ pb->is_streamed = 1; if (feed->fmt->read_header(&s, 0) < 0) { goto fail; } /* Now we have the actual streams */ if (s.nb_streams != feed->nb_streams) { goto fail; } for (i = 0; i < s.nb_streams; i++) { memcpy(&feed->streams[i]->codec, &s.streams[i]->codec, sizeof(AVCodecContext)); } } c->buffer_ptr = c->buffer; } len = read(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr); if (len < 0) { if (errno != EAGAIN && errno != EINTR) { /* error : close connection */ goto fail; } } else if (len == 0) { /* end of connection : close it */ goto fail; } else { c->buffer_ptr += len; c->data_count += len; } return 0; fail: c->stream->feed_opened = 0; close(c->feed_fd); return -1; } | static int http_receive_data(HTTPContext *c) { int len; HTTPContext *c1; if (c->buffer_ptr >= c->buffer_end) { FFStream *feed = c->stream; if (c->data_count > FFM_PACKET_SIZE) { | 1,220 |
1 | static int ms_adpcm_decode_block (ms_adpcm_data *msadpcm, uint8_t *encoded,
int16_t *decoded)
{
int i, outputLength, samplesRemaining;
int channelCount;
int16_t *coefficient[2];
ms_adpcm_state decoderState[2];
ms_adpcm_state *state[2];
/* Calculate the number of bytes needed for decoded data. */
outputLength = msadpcm->samplesPerBlock * sizeof (int16_t) *
msadpcm->track->f.channelCount;
channelCount = msadpcm->track->f.channelCount;
state[0] = &decoderState[0];
if (channelCount == 2)
state[1] = &decoderState[1];
else
state[1] = &decoderState[0];
/* Initialize predictor. */
for (i=0; i<channelCount; i++)
{
state[i]->predictor = *encoded++;
assert(state[i]->predictor < msadpcm->numCoefficients);
}
/* Initialize delta. */
for (i=0; i<channelCount; i++)
{
state[i]->delta = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
/* Initialize first two samples. */
for (i=0; i<channelCount; i++)
{
state[i]->sample1 = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
for (i=0; i<channelCount; i++)
{
state[i]->sample2 = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
coefficient[0] = msadpcm->coefficients[state[0]->predictor];
coefficient[1] = msadpcm->coefficients[state[1]->predictor];
for (i=0; i<channelCount; i++)
*decoded++ = state[i]->sample2;
for (i=0; i<channelCount; i++)
*decoded++ = state[i]->sample1;
/*
The first two samples have already been 'decoded' in
the block header.
*/
samplesRemaining = (msadpcm->samplesPerBlock - 2) *
msadpcm->track->f.channelCount;
while (samplesRemaining > 0)
{
uint8_t code;
int16_t newSample;
code = *encoded >> 4;
newSample = ms_adpcm_decode_sample(state[0], code,
coefficient[0]);
*decoded++ = newSample;
code = *encoded & 0x0f;
newSample = ms_adpcm_decode_sample(state[1], code,
coefficient[1]);
*decoded++ = newSample;
encoded++;
samplesRemaining -= 2;
}
return outputLength;
} | static int ms_adpcm_decode_block (ms_adpcm_data *msadpcm, uint8_t *encoded,
int16_t *decoded)
{
int i, outputLength, samplesRemaining;
int channelCount;
int16_t *coefficient[2];
ms_adpcm_state decoderState[2];
ms_adpcm_state *state[2];
outputLength = msadpcm->samplesPerBlock * sizeof (int16_t) *
msadpcm->track->f.channelCount;
channelCount = msadpcm->track->f.channelCount;
state[0] = &decoderState[0];
if (channelCount == 2)
state[1] = &decoderState[1];
else
state[1] = &decoderState[0];
for (i=0; i<channelCount; i++)
{
state[i]->predictor = *encoded++;
assert(state[i]->predictor < msadpcm->numCoefficients);
}
for (i=0; i<channelCount; i++)
{
state[i]->delta = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
for (i=0; i<channelCount; i++)
{
state[i]->sample1 = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
for (i=0; i<channelCount; i++)
{
state[i]->sample2 = (encoded[1]<<8) | encoded[0];
encoded += sizeof (uint16_t);
}
coefficient[0] = msadpcm->coefficients[state[0]->predictor];
coefficient[1] = msadpcm->coefficients[state[1]->predictor];
for (i=0; i<channelCount; i++)
*decoded++ = state[i]->sample2;
for (i=0; i<channelCount; i++)
*decoded++ = state[i]->sample1;
samplesRemaining = (msadpcm->samplesPerBlock - 2) *
msadpcm->track->f.channelCount;
while (samplesRemaining > 0)
{
uint8_t code;
int16_t newSample;
code = *encoded >> 4;
newSample = ms_adpcm_decode_sample(state[0], code,
coefficient[0]);
*decoded++ = newSample;
code = *encoded & 0x0f;
newSample = ms_adpcm_decode_sample(state[1], code,
coefficient[1]);
*decoded++ = newSample;
encoded++;
samplesRemaining -= 2;
}
return outputLength;
} | 1,222 |
0 | static guint16 de_tp_rlc_sdu_counter_value ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len _U_ , gchar * add_string _U_ , int string_len _U_ ) {
guint32 curr_offset = offset ;
proto_tree_add_item ( tree , hf_gsm_a_dtap_ue_received_rlc_sdu_counter_value , tvb , curr_offset , 4 , ENC_BIG_ENDIAN ) ;
curr_offset += 4 ;
return ( curr_offset - offset ) ;
} | static guint16 de_tp_rlc_sdu_counter_value ( tvbuff_t * tvb , proto_tree * tree , packet_info * pinfo _U_ , guint32 offset , guint len _U_ , gchar * add_string _U_ , int string_len _U_ ) {
guint32 curr_offset = offset ;
proto_tree_add_item ( tree , hf_gsm_a_dtap_ue_received_rlc_sdu_counter_value , tvb , curr_offset , 4 , ENC_BIG_ENDIAN ) ;
curr_offset += 4 ;
return ( curr_offset - offset ) ;
} | 1,223 |
1 | static void ms_adpcm_reset1 (_AFmoduleinst *i)
{
ms_adpcm_data *d = (ms_adpcm_data *) i->modspec;
AFframecount nextTrackFrame;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
nextTrackFrame = d->track->nextfframe;
d->track->nextfframe = (nextTrackFrame / framesPerBlock) *
framesPerBlock;
d->framesToIgnore = nextTrackFrame - d->track->nextfframe;
/* postroll = frames2ignore */
} | static void ms_adpcm_reset1 (_AFmoduleinst *i)
{
ms_adpcm_data *d = (ms_adpcm_data *) i->modspec;
AFframecount nextTrackFrame;
int framesPerBlock;
framesPerBlock = d->samplesPerBlock / d->track->f.channelCount;
nextTrackFrame = d->track->nextfframe;
d->track->nextfframe = (nextTrackFrame / framesPerBlock) *
framesPerBlock;
d->framesToIgnore = nextTrackFrame - d->track->nextfframe;
} | 1,224 |
0 | cmsBool CMSEXPORT cmsAppendNamedColor(cmsNAMEDCOLORLIST* NamedColorList,
const char* Name,
cmsUInt16Number PCS[3], cmsUInt16Number Colorant[cmsMAXCHANNELS])
{
cmsUInt32Number i;
if (NamedColorList == NULL) return FALSE;
if (NamedColorList ->nColors + 1 > NamedColorList ->Allocated) {
if (!GrowNamedColorList(NamedColorList)) return FALSE;
}
for (i=0; i < NamedColorList ->ColorantCount; i++)
NamedColorList ->List[NamedColorList ->nColors].DeviceColorant[i] = Colorant == NULL? 0 : Colorant[i];
for (i=0; i < 3; i++)
NamedColorList ->List[NamedColorList ->nColors].PCS[i] = PCS == NULL ? 0 : PCS[i];
if (Name != NULL) {
strncpy(NamedColorList ->List[NamedColorList ->nColors].Name, Name, cmsMAX_PATH-1);
NamedColorList ->List[NamedColorList ->nColors].Name[cmsMAX_PATH-1] = 0;
}
else
NamedColorList ->List[NamedColorList ->nColors].Name[0] = 0;
NamedColorList ->nColors++;
return TRUE;
} | cmsBool CMSEXPORT cmsAppendNamedColor(cmsNAMEDCOLORLIST* NamedColorList,
const char* Name,
cmsUInt16Number PCS[3], cmsUInt16Number Colorant[cmsMAXCHANNELS])
{
cmsUInt32Number i;
if (NamedColorList == NULL) return FALSE;
if (NamedColorList ->nColors + 1 > NamedColorList ->Allocated) {
if (!GrowNamedColorList(NamedColorList)) return FALSE;
}
for (i=0; i < NamedColorList ->ColorantCount; i++)
NamedColorList ->List[NamedColorList ->nColors].DeviceColorant[i] = Colorant == NULL? 0 : Colorant[i];
for (i=0; i < 3; i++)
NamedColorList ->List[NamedColorList ->nColors].PCS[i] = PCS == NULL ? 0 : PCS[i];
if (Name != NULL) {
strncpy(NamedColorList ->List[NamedColorList ->nColors].Name, Name, cmsMAX_PATH-1);
NamedColorList ->List[NamedColorList ->nColors].Name[cmsMAX_PATH-1] = 0;
}
else
NamedColorList ->List[NamedColorList ->nColors].Name[0] = 0;
NamedColorList ->nColors++;
return TRUE;
} | 1,226 |
0 | TSReturnCode TSUrlSchemeSet ( TSMBuffer bufp , TSMLoc obj , const char * value , int length ) {
return URLPartSet ( bufp , obj , value , length , & URL : : scheme_set ) ;
} | TSReturnCode TSUrlSchemeSet ( TSMBuffer bufp , TSMLoc obj , const char * value , int length ) {
return URLPartSet ( bufp , obj , value , length , & URL : : scheme_set ) ;
} | 1,227 |
0 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
{
struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
int i, err = 0;
int free = -1;
mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
if (free < 0 && !table->refs[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
/* MAC already registered, increase refernce count */
*index = i;
++table->refs[i];
goto out;
}
}
if (free < 0) {
err = -ENOMEM;
goto out;
}
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
/* No free mac entries */
err = -ENOSPC;
goto out;
}
/* Register new MAC */
table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
table->refs[free] = 0;
table->entries[free] = 0;
goto out;
}
*index = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
} | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
{
struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
int i, err = 0;
int free = -1;
mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
if (free < 0 && !table->refs[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
*index = i;
++table->refs[i];
goto out;
}
}
if (free < 0) {
err = -ENOMEM;
goto out;
}
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
err = -ENOSPC;
goto out;
}
table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
table->refs[free] = 0;
table->entries[free] = 0;
goto out;
}
*index = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
} | 1,228 |
1 | static status ParseFormat (AFfilehandle filehandle, AFvirtualfile *fp,
uint32_t id, size_t size)
{
_Track *track;
uint16_t formatTag, channelCount;
uint32_t sampleRate, averageBytesPerSecond;
uint16_t blockAlign;
_WAVEInfo *wave;
assert(filehandle != NULL);
assert(fp != NULL);
assert(!memcmp(&id, "fmt ", 4));
track = _af_filehandle_get_track(filehandle, AF_DEFAULT_TRACK);
assert(filehandle->formatSpecific != NULL);
wave = (_WAVEInfo *) filehandle->formatSpecific;
af_read_uint16_le(&formatTag, fp);
af_read_uint16_le(&channelCount, fp);
af_read_uint32_le(&sampleRate, fp);
af_read_uint32_le(&averageBytesPerSecond, fp);
af_read_uint16_le(&blockAlign, fp);
track->f.channelCount = channelCount;
track->f.sampleRate = sampleRate;
track->f.byteOrder = AF_BYTEORDER_LITTLEENDIAN;
/* Default to uncompressed audio data. */
track->f.compressionType = AF_COMPRESSION_NONE;
switch (formatTag)
{
case WAVE_FORMAT_PCM:
{
uint16_t bitsPerSample;
af_read_uint16_le(&bitsPerSample, fp);
track->f.sampleWidth = bitsPerSample;
if (bitsPerSample == 0 || bitsPerSample > 32)
{
_af_error(AF_BAD_WIDTH,
"bad sample width of %d bits",
bitsPerSample);
return AF_FAIL;
}
if (bitsPerSample <= 8)
track->f.sampleFormat = AF_SAMPFMT_UNSIGNED;
else
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
}
break;
case WAVE_FORMAT_MULAW:
case IBM_FORMAT_MULAW:
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_G711_ULAW;
break;
case WAVE_FORMAT_ALAW:
case IBM_FORMAT_ALAW:
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_G711_ALAW;
break;
case WAVE_FORMAT_IEEE_FLOAT:
{
uint16_t bitsPerSample;
af_read_uint16_le(&bitsPerSample, fp);
if (bitsPerSample == 64)
{
track->f.sampleWidth = 64;
track->f.sampleFormat = AF_SAMPFMT_DOUBLE;
}
else
{
track->f.sampleWidth = 32;
track->f.sampleFormat = AF_SAMPFMT_FLOAT;
}
}
break;
case WAVE_FORMAT_ADPCM:
{
uint16_t bitsPerSample, extraByteCount,
samplesPerBlock, numCoefficients;
int i;
AUpvlist pv;
long l;
void *v;
if (track->f.channelCount != 1 &&
track->f.channelCount != 2)
{
_af_error(AF_BAD_CHANNELS,
"WAVE file with MS ADPCM compression "
"must have 1 or 2 channels");
}
af_read_uint16_le(&bitsPerSample, fp);
af_read_uint16_le(&extraByteCount, fp);
af_read_uint16_le(&samplesPerBlock, fp);
af_read_uint16_le(&numCoefficients, fp);
/* numCoefficients should be at least 7. */
assert(numCoefficients >= 7 && numCoefficients <= 255);
for (i=0; i<numCoefficients; i++)
{
int16_t a0, a1;
af_fread(&a0, 1, 2, fp);
af_fread(&a1, 1, 2, fp);
a0 = LENDIAN_TO_HOST_INT16(a0);
a1 = LENDIAN_TO_HOST_INT16(a1);
wave->msadpcmCoefficients[i][0] = a0;
wave->msadpcmCoefficients[i][1] = a1;
}
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_MS_ADPCM;
track->f.byteOrder = _AF_BYTEORDER_NATIVE;
/* Create the parameter list. */
pv = AUpvnew(4);
AUpvsetparam(pv, 0, _AF_MS_ADPCM_NUM_COEFFICIENTS);
AUpvsetvaltype(pv, 0, AU_PVTYPE_LONG);
l = numCoefficients;
AUpvsetval(pv, 0, &l);
AUpvsetparam(pv, 1, _AF_MS_ADPCM_COEFFICIENTS);
AUpvsetvaltype(pv, 1, AU_PVTYPE_PTR);
v = wave->msadpcmCoefficients;
AUpvsetval(pv, 1, &v);
AUpvsetparam(pv, 2, _AF_SAMPLES_PER_BLOCK);
AUpvsetvaltype(pv, 2, AU_PVTYPE_LONG);
l = samplesPerBlock;
AUpvsetval(pv, 2, &l);
AUpvsetparam(pv, 3, _AF_BLOCK_SIZE);
AUpvsetvaltype(pv, 3, AU_PVTYPE_LONG);
l = blockAlign;
AUpvsetval(pv, 3, &l);
track->f.compressionParams = pv;
}
break;
case WAVE_FORMAT_DVI_ADPCM:
{
AUpvlist pv;
long l;
uint16_t bitsPerSample, extraByteCount,
samplesPerBlock;
af_read_uint16_le(&bitsPerSample, fp);
af_read_uint16_le(&extraByteCount, fp);
af_read_uint16_le(&samplesPerBlock, fp);
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_IMA;
track->f.byteOrder = _AF_BYTEORDER_NATIVE;
/* Create the parameter list. */
pv = AUpvnew(2);
AUpvsetparam(pv, 0, _AF_SAMPLES_PER_BLOCK);
AUpvsetvaltype(pv, 0, AU_PVTYPE_LONG);
l = samplesPerBlock;
AUpvsetval(pv, 0, &l);
AUpvsetparam(pv, 1, _AF_BLOCK_SIZE);
AUpvsetvaltype(pv, 1, AU_PVTYPE_LONG);
l = blockAlign;
AUpvsetval(pv, 1, &l);
track->f.compressionParams = pv;
}
break;
case WAVE_FORMAT_YAMAHA_ADPCM:
case WAVE_FORMAT_OKI_ADPCM:
case WAVE_FORMAT_CREATIVE_ADPCM:
case IBM_FORMAT_ADPCM:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE ADPCM data format 0x%x is not currently supported", formatTag);
return AF_FAIL;
break;
case WAVE_FORMAT_MPEG:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE MPEG data format is not supported");
return AF_FAIL;
break;
case WAVE_FORMAT_MPEGLAYER3:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE MPEG layer 3 data format is not supported");
return AF_FAIL;
break;
default:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE file data format 0x%x not currently supported", formatTag);
return AF_FAIL;
break;
}
_af_set_sample_format(&track->f, track->f.sampleFormat, track->f.sampleWidth);
return AF_SUCCEED;
} | static status ParseFormat (AFfilehandle filehandle, AFvirtualfile *fp,
uint32_t id, size_t size)
{
_Track *track;
uint16_t formatTag, channelCount;
uint32_t sampleRate, averageBytesPerSecond;
uint16_t blockAlign;
_WAVEInfo *wave;
assert(filehandle != NULL);
assert(fp != NULL);
assert(!memcmp(&id, "fmt ", 4));
track = _af_filehandle_get_track(filehandle, AF_DEFAULT_TRACK);
assert(filehandle->formatSpecific != NULL);
wave = (_WAVEInfo *) filehandle->formatSpecific;
af_read_uint16_le(&formatTag, fp);
af_read_uint16_le(&channelCount, fp);
af_read_uint32_le(&sampleRate, fp);
af_read_uint32_le(&averageBytesPerSecond, fp);
af_read_uint16_le(&blockAlign, fp);
track->f.channelCount = channelCount;
track->f.sampleRate = sampleRate;
track->f.byteOrder = AF_BYTEORDER_LITTLEENDIAN;
track->f.compressionType = AF_COMPRESSION_NONE;
switch (formatTag)
{
case WAVE_FORMAT_PCM:
{
uint16_t bitsPerSample;
af_read_uint16_le(&bitsPerSample, fp);
track->f.sampleWidth = bitsPerSample;
if (bitsPerSample == 0 || bitsPerSample > 32)
{
_af_error(AF_BAD_WIDTH,
"bad sample width of %d bits",
bitsPerSample);
return AF_FAIL;
}
if (bitsPerSample <= 8)
track->f.sampleFormat = AF_SAMPFMT_UNSIGNED;
else
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
}
break;
case WAVE_FORMAT_MULAW:
case IBM_FORMAT_MULAW:
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_G711_ULAW;
break;
case WAVE_FORMAT_ALAW:
case IBM_FORMAT_ALAW:
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_G711_ALAW;
break;
case WAVE_FORMAT_IEEE_FLOAT:
{
uint16_t bitsPerSample;
af_read_uint16_le(&bitsPerSample, fp);
if (bitsPerSample == 64)
{
track->f.sampleWidth = 64;
track->f.sampleFormat = AF_SAMPFMT_DOUBLE;
}
else
{
track->f.sampleWidth = 32;
track->f.sampleFormat = AF_SAMPFMT_FLOAT;
}
}
break;
case WAVE_FORMAT_ADPCM:
{
uint16_t bitsPerSample, extraByteCount,
samplesPerBlock, numCoefficients;
int i;
AUpvlist pv;
long l;
void *v;
if (track->f.channelCount != 1 &&
track->f.channelCount != 2)
{
_af_error(AF_BAD_CHANNELS,
"WAVE file with MS ADPCM compression "
"must have 1 or 2 channels");
}
af_read_uint16_le(&bitsPerSample, fp);
af_read_uint16_le(&extraByteCount, fp);
af_read_uint16_le(&samplesPerBlock, fp);
af_read_uint16_le(&numCoefficients, fp);
assert(numCoefficients >= 7 && numCoefficients <= 255);
for (i=0; i<numCoefficients; i++)
{
int16_t a0, a1;
af_fread(&a0, 1, 2, fp);
af_fread(&a1, 1, 2, fp);
a0 = LENDIAN_TO_HOST_INT16(a0);
a1 = LENDIAN_TO_HOST_INT16(a1);
wave->msadpcmCoefficients[i][0] = a0;
wave->msadpcmCoefficients[i][1] = a1;
}
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_MS_ADPCM;
track->f.byteOrder = _AF_BYTEORDER_NATIVE;
pv = AUpvnew(4);
AUpvsetparam(pv, 0, _AF_MS_ADPCM_NUM_COEFFICIENTS);
AUpvsetvaltype(pv, 0, AU_PVTYPE_LONG);
l = numCoefficients;
AUpvsetval(pv, 0, &l);
AUpvsetparam(pv, 1, _AF_MS_ADPCM_COEFFICIENTS);
AUpvsetvaltype(pv, 1, AU_PVTYPE_PTR);
v = wave->msadpcmCoefficients;
AUpvsetval(pv, 1, &v);
AUpvsetparam(pv, 2, _AF_SAMPLES_PER_BLOCK);
AUpvsetvaltype(pv, 2, AU_PVTYPE_LONG);
l = samplesPerBlock;
AUpvsetval(pv, 2, &l);
AUpvsetparam(pv, 3, _AF_BLOCK_SIZE);
AUpvsetvaltype(pv, 3, AU_PVTYPE_LONG);
l = blockAlign;
AUpvsetval(pv, 3, &l);
track->f.compressionParams = pv;
}
break;
case WAVE_FORMAT_DVI_ADPCM:
{
AUpvlist pv;
long l;
uint16_t bitsPerSample, extraByteCount,
samplesPerBlock;
af_read_uint16_le(&bitsPerSample, fp);
af_read_uint16_le(&extraByteCount, fp);
af_read_uint16_le(&samplesPerBlock, fp);
track->f.sampleWidth = 16;
track->f.sampleFormat = AF_SAMPFMT_TWOSCOMP;
track->f.compressionType = AF_COMPRESSION_IMA;
track->f.byteOrder = _AF_BYTEORDER_NATIVE;
pv = AUpvnew(2);
AUpvsetparam(pv, 0, _AF_SAMPLES_PER_BLOCK);
AUpvsetvaltype(pv, 0, AU_PVTYPE_LONG);
l = samplesPerBlock;
AUpvsetval(pv, 0, &l);
AUpvsetparam(pv, 1, _AF_BLOCK_SIZE);
AUpvsetvaltype(pv, 1, AU_PVTYPE_LONG);
l = blockAlign;
AUpvsetval(pv, 1, &l);
track->f.compressionParams = pv;
}
break;
case WAVE_FORMAT_YAMAHA_ADPCM:
case WAVE_FORMAT_OKI_ADPCM:
case WAVE_FORMAT_CREATIVE_ADPCM:
case IBM_FORMAT_ADPCM:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE ADPCM data format 0x%x is not currently supported", formatTag);
return AF_FAIL;
break;
case WAVE_FORMAT_MPEG:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE MPEG data format is not supported");
return AF_FAIL;
break;
case WAVE_FORMAT_MPEGLAYER3:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE MPEG layer 3 data format is not supported");
return AF_FAIL;
break;
default:
_af_error(AF_BAD_NOT_IMPLEMENTED, "WAVE file data format 0x%x not currently supported", formatTag);
return AF_FAIL;
break;
}
_af_set_sample_format(&track->f, track->f.sampleFormat, track->f.sampleWidth);
return AF_SUCCEED;
} | 1,230 |
0 | void psf_init_files ( SF_PRIVATE * psf ) {
psf -> file . filedes = - 1 ;
psf -> rsrc . filedes = - 1 ;
psf -> file . savedes = - 1 ;
} | void psf_init_files ( SF_PRIVATE * psf ) {
psf -> file . filedes = - 1 ;
psf -> rsrc . filedes = - 1 ;
psf -> file . savedes = - 1 ;
} | 1,232 |
1 | long keyctl_join_session_keyring(const char __user *_name)
{
char *name;
long ret;
/* fetch the name from userspace */
name = NULL;
if (_name) {
name = strndup_user(_name, PAGE_SIZE);
if (IS_ERR(name)) {
ret = PTR_ERR(name);
goto error;
}
}
/* join the session */
ret = join_session_keyring(name);
error:
return ret;
} /* end keyctl_join_session_keyring() */ | long keyctl_join_session_keyring(const char __user *_name)
{
char *name;
long ret;
name = NULL;
if (_name) {
name = strndup_user(_name, PAGE_SIZE);
if (IS_ERR(name)) {
ret = PTR_ERR(name);
goto error;
}
}
ret = join_session_keyring(name);
error:
return ret;
} | 1,233 |
1 | QByteArray Cipher::blowfishECB(QByteArray cipherText, bool direction)
{
QCA::Initializer init;
QByteArray temp = cipherText;
//do padding ourselves
if (direction)
{
while ((temp.length() % 8) != 0) temp.append('\0');
}
else
{
temp = b64ToByte(temp);
while ((temp.length() % 8) != 0) temp.append('\0');
}
QCA::Direction dir = (direction) ? QCA::Encode : QCA::Decode;
QCA::Cipher cipher(m_type, QCA::Cipher::ECB, QCA::Cipher::NoPadding, dir, m_key);
QByteArray temp2 = cipher.update(QCA::MemoryRegion(temp)).toByteArray();
temp2 += cipher.final().toByteArray();
if (!cipher.ok())
return cipherText;
if (direction)
temp2 = byteToB64(temp2);
return temp2;
} | QByteArray Cipher::blowfishECB(QByteArray cipherText, bool direction)
{
QCA::Initializer init;
QByteArray temp = cipherText;
if (direction)
{
while ((temp.length() % 8) != 0) temp.append('\0');
}
else
{
temp = b64ToByte(temp);
while ((temp.length() % 8) != 0) temp.append('\0');
}
QCA::Direction dir = (direction) ? QCA::Encode : QCA::Decode;
QCA::Cipher cipher(m_type, QCA::Cipher::ECB, QCA::Cipher::NoPadding, dir, m_key);
QByteArray temp2 = cipher.update(QCA::MemoryRegion(temp)).toByteArray();
temp2 += cipher.final().toByteArray();
if (!cipher.ok())
return cipherText;
if (direction)
temp2 = byteToB64(temp2);
return temp2;
} | 1,234 |
1 | static void reschedule_dma(void *opaque) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; qemu_bh_delete(dbs->bh); dbs->bh = NULL; dma_bdrv_cb(opaque, 0); } | static void reschedule_dma(void *opaque) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; qemu_bh_delete(dbs->bh); dbs->bh = NULL; dma_bdrv_cb(opaque, 0); } | 1,235 |
0 | static int dissect_printerdata_data ( tvbuff_t * tvb , int offset , packet_info * pinfo , proto_tree * tree , dcerpc_info * di , guint8 * drep _U_ , guint32 type ) {
proto_item * item , * hidden_item ;
proto_tree * subtree ;
guint32 size ;
subtree = proto_tree_add_subtree ( tree , tvb , offset , 0 , ett_printerdata_data , & item , "Data" ) ;
offset = dissect_ndr_uint32 ( tvb , offset , pinfo , subtree , di , drep , hf_printerdata_size , & size ) ;
if ( size ) {
offset = dissect_ndr_uint8s ( tvb , offset , pinfo , subtree , di , drep , hf_printerdata_data , size , NULL ) ;
switch ( type ) {
case DCERPC_REG_SZ : {
char * data = tvb_get_string_enc ( NULL , tvb , offset - size , size , ENC_UTF_16 | ENC_LITTLE_ENDIAN ) ;
proto_item_append_text ( item , ": %s" , data ) ;
col_append_fstr ( pinfo -> cinfo , COL_INFO , " = %s" , data ) ;
hidden_item = proto_tree_add_string ( tree , hf_printerdata_data_sz , tvb , offset - size , size , data ) ;
PROTO_ITEM_SET_HIDDEN ( hidden_item ) ;
g_free ( data ) ;
break ;
}
case DCERPC_REG_DWORD : {
guint32 data = tvb_get_letohl ( tvb , offset - size ) ;
proto_item_append_text ( item , ": 0x%08x" , data ) ;
col_append_fstr ( pinfo -> cinfo , COL_INFO , " = 0x%08x" , data ) ;
hidden_item = proto_tree_add_uint ( tree , hf_printerdata_data_dword , tvb , offset - size , 4 , data ) ;
PROTO_ITEM_SET_HIDDEN ( hidden_item ) ;
break ;
}
case DCERPC_REG_BINARY : col_append_str ( pinfo -> cinfo , COL_INFO , " = <binary data>" ) ;
break ;
default : break ;
}
}
proto_item_set_len ( item , size + 4 ) ;
return offset ;
} | static int dissect_printerdata_data ( tvbuff_t * tvb , int offset , packet_info * pinfo , proto_tree * tree , dcerpc_info * di , guint8 * drep _U_ , guint32 type ) {
proto_item * item , * hidden_item ;
proto_tree * subtree ;
guint32 size ;
subtree = proto_tree_add_subtree ( tree , tvb , offset , 0 , ett_printerdata_data , & item , "Data" ) ;
offset = dissect_ndr_uint32 ( tvb , offset , pinfo , subtree , di , drep , hf_printerdata_size , & size ) ;
if ( size ) {
offset = dissect_ndr_uint8s ( tvb , offset , pinfo , subtree , di , drep , hf_printerdata_data , size , NULL ) ;
switch ( type ) {
case DCERPC_REG_SZ : {
char * data = tvb_get_string_enc ( NULL , tvb , offset - size , size , ENC_UTF_16 | ENC_LITTLE_ENDIAN ) ;
proto_item_append_text ( item , ": %s" , data ) ;
col_append_fstr ( pinfo -> cinfo , COL_INFO , " = %s" , data ) ;
hidden_item = proto_tree_add_string ( tree , hf_printerdata_data_sz , tvb , offset - size , size , data ) ;
PROTO_ITEM_SET_HIDDEN ( hidden_item ) ;
g_free ( data ) ;
break ;
}
case DCERPC_REG_DWORD : {
guint32 data = tvb_get_letohl ( tvb , offset - size ) ;
proto_item_append_text ( item , ": 0x%08x" , data ) ;
col_append_fstr ( pinfo -> cinfo , COL_INFO , " = 0x%08x" , data ) ;
hidden_item = proto_tree_add_uint ( tree , hf_printerdata_data_dword , tvb , offset - size , 4 , data ) ;
PROTO_ITEM_SET_HIDDEN ( hidden_item ) ;
break ;
}
case DCERPC_REG_BINARY : col_append_str ( pinfo -> cinfo , COL_INFO , " = <binary data>" ) ;
break ;
default : break ;
}
}
proto_item_set_len ( item , size + 4 ) ;
return offset ;
} | 1,236 |
1 | int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
union {
int val;
struct linger ling;
struct timeval tm;
} v;
unsigned int lv = sizeof(int);
int len;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch(optname) {
case SO_DEBUG:
v.val = sock_flag(sk, SOCK_DBG);
break;
case SO_DONTROUTE:
v.val = sock_flag(sk, SOCK_LOCALROUTE);
break;
case SO_BROADCAST:
v.val = !!sock_flag(sk, SOCK_BROADCAST);
break;
case SO_SNDBUF:
v.val = sk->sk_sndbuf;
break;
case SO_RCVBUF:
v.val = sk->sk_rcvbuf;
break;
case SO_REUSEADDR:
v.val = sk->sk_reuse;
break;
case SO_KEEPALIVE:
v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
break;
case SO_TYPE:
v.val = sk->sk_type;
break;
case SO_ERROR:
v.val = -sock_error(sk);
if (v.val==0)
v.val = xchg(&sk->sk_err_soft, 0);
break;
case SO_OOBINLINE:
v.val = !!sock_flag(sk, SOCK_URGINLINE);
break;
case SO_NO_CHECK:
v.val = sk->sk_no_check;
break;
case SO_PRIORITY:
v.val = sk->sk_priority;
break;
case SO_LINGER:
lv = sizeof(v.ling);
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
v.ling.l_linger = sk->sk_lingertime / HZ;
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("getsockopt");
break;
case SO_TIMESTAMP:
v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
!sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_TIMESTAMPNS:
v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_RCVTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_SNDTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ;
v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_RCVLOWAT:
v.val = sk->sk_rcvlowat;
break;
case SO_SNDLOWAT:
v.val=1;
break;
case SO_PASSCRED:
v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
break;
case SO_PEERCRED:
if (len > sizeof(sk->sk_peercred))
len = sizeof(sk->sk_peercred);
if (copy_to_user(optval, &sk->sk_peercred, len))
return -EFAULT;
goto lenout;
case SO_PEERNAME:
{
char address[128];
if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
return -ENOTCONN;
if (lv < len)
return -EINVAL;
if (copy_to_user(optval, address, len))
return -EFAULT;
goto lenout;
}
/* Dubious BSD thing... Probably nobody even uses it, but
* the UNIX standard wants it for whatever reason... -DaveM
*/
case SO_ACCEPTCONN:
v.val = sk->sk_state == TCP_LISTEN;
break;
case SO_PASSSEC:
v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
break;
case SO_PEERSEC:
return security_socket_getpeersec_stream(sock, optval, optlen, len);
case SO_MARK:
v.val = sk->sk_mark;
break;
default:
return -ENOPROTOOPT;
}
if (len > lv)
len = lv;
if (copy_to_user(optval, &v, len))
return -EFAULT;
lenout:
if (put_user(len, optlen))
return -EFAULT;
return 0;
} | int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
union {
int val;
struct linger ling;
struct timeval tm;
} v;
unsigned int lv = sizeof(int);
int len;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch(optname) {
case SO_DEBUG:
v.val = sock_flag(sk, SOCK_DBG);
break;
case SO_DONTROUTE:
v.val = sock_flag(sk, SOCK_LOCALROUTE);
break;
case SO_BROADCAST:
v.val = !!sock_flag(sk, SOCK_BROADCAST);
break;
case SO_SNDBUF:
v.val = sk->sk_sndbuf;
break;
case SO_RCVBUF:
v.val = sk->sk_rcvbuf;
break;
case SO_REUSEADDR:
v.val = sk->sk_reuse;
break;
case SO_KEEPALIVE:
v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
break;
case SO_TYPE:
v.val = sk->sk_type;
break;
case SO_ERROR:
v.val = -sock_error(sk);
if (v.val==0)
v.val = xchg(&sk->sk_err_soft, 0);
break;
case SO_OOBINLINE:
v.val = !!sock_flag(sk, SOCK_URGINLINE);
break;
case SO_NO_CHECK:
v.val = sk->sk_no_check;
break;
case SO_PRIORITY:
v.val = sk->sk_priority;
break;
case SO_LINGER:
lv = sizeof(v.ling);
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
v.ling.l_linger = sk->sk_lingertime / HZ;
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("getsockopt");
break;
case SO_TIMESTAMP:
v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
!sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_TIMESTAMPNS:
v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_RCVTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_SNDTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ;
v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_RCVLOWAT:
v.val = sk->sk_rcvlowat;
break;
case SO_SNDLOWAT:
v.val=1;
break;
case SO_PASSCRED:
v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
break;
case SO_PEERCRED:
if (len > sizeof(sk->sk_peercred))
len = sizeof(sk->sk_peercred);
if (copy_to_user(optval, &sk->sk_peercred, len))
return -EFAULT;
goto lenout;
case SO_PEERNAME:
{
char address[128];
if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
return -ENOTCONN;
if (lv < len)
return -EINVAL;
if (copy_to_user(optval, address, len))
return -EFAULT;
goto lenout;
}
case SO_ACCEPTCONN:
v.val = sk->sk_state == TCP_LISTEN;
break;
case SO_PASSSEC:
v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
break;
case SO_PEERSEC:
return security_socket_getpeersec_stream(sock, optval, optlen, len);
case SO_MARK:
v.val = sk->sk_mark;
break;
default:
return -ENOPROTOOPT;
}
if (len > lv)
len = lv;
if (copy_to_user(optval, &v, len))
return -EFAULT;
lenout:
if (put_user(len, optlen))
return -EFAULT;
return 0;
} | 1,237 |
0 | static void update_mbgraph_frame_stats ( VP9_COMP * cpi , MBGRAPH_FRAME_STATS * stats , YV12_BUFFER_CONFIG * buf , YV12_BUFFER_CONFIG * golden_ref , YV12_BUFFER_CONFIG * alt_ref ) {
MACROBLOCK * const x = & cpi -> mb ;
MACROBLOCKD * const xd = & x -> e_mbd ;
VP9_COMMON * const cm = & cpi -> common ;
int mb_col , mb_row , offset = 0 ;
int mb_y_offset = 0 , arf_y_offset = 0 , gld_y_offset = 0 ;
MV gld_top_mv = {
0 , 0 }
;
MODE_INFO mi_local ;
vp9_zero ( mi_local ) ;
x -> mv_row_min = - BORDER_MV_PIXELS_B16 ;
x -> mv_row_max = ( cm -> mb_rows - 1 ) * 8 + BORDER_MV_PIXELS_B16 ;
xd -> up_available = 0 ;
xd -> plane [ 0 ] . dst . stride = buf -> y_stride ;
xd -> plane [ 0 ] . pre [ 0 ] . stride = buf -> y_stride ;
xd -> plane [ 1 ] . dst . stride = buf -> uv_stride ;
xd -> mi [ 0 ] . src_mi = & mi_local ;
mi_local . mbmi . sb_type = BLOCK_16X16 ;
mi_local . mbmi . ref_frame [ 0 ] = LAST_FRAME ;
mi_local . mbmi . ref_frame [ 1 ] = NONE ;
for ( mb_row = 0 ;
mb_row < cm -> mb_rows ;
mb_row ++ ) {
MV gld_left_mv = gld_top_mv ;
int mb_y_in_offset = mb_y_offset ;
int arf_y_in_offset = arf_y_offset ;
int gld_y_in_offset = gld_y_offset ;
x -> mv_col_min = - BORDER_MV_PIXELS_B16 ;
x -> mv_col_max = ( cm -> mb_cols - 1 ) * 8 + BORDER_MV_PIXELS_B16 ;
xd -> left_available = 0 ;
for ( mb_col = 0 ;
mb_col < cm -> mb_cols ;
mb_col ++ ) {
MBGRAPH_MB_STATS * mb_stats = & stats -> mb_stats [ offset + mb_col ] ;
update_mbgraph_mb_stats ( cpi , mb_stats , buf , mb_y_in_offset , golden_ref , & gld_left_mv , alt_ref , mb_row , mb_col ) ;
gld_left_mv = mb_stats -> ref [ GOLDEN_FRAME ] . m . mv . as_mv ;
if ( mb_col == 0 ) {
gld_top_mv = gld_left_mv ;
}
xd -> left_available = 1 ;
mb_y_in_offset += 16 ;
gld_y_in_offset += 16 ;
arf_y_in_offset += 16 ;
x -> mv_col_min -= 16 ;
x -> mv_col_max -= 16 ;
}
xd -> up_available = 1 ;
mb_y_offset += buf -> y_stride * 16 ;
gld_y_offset += golden_ref -> y_stride * 16 ;
if ( alt_ref ) arf_y_offset += alt_ref -> y_stride * 16 ;
x -> mv_row_min -= 16 ;
x -> mv_row_max -= 16 ;
offset += cm -> mb_cols ;
}
} | static void update_mbgraph_frame_stats ( VP9_COMP * cpi , MBGRAPH_FRAME_STATS * stats , YV12_BUFFER_CONFIG * buf , YV12_BUFFER_CONFIG * golden_ref , YV12_BUFFER_CONFIG * alt_ref ) {
MACROBLOCK * const x = & cpi -> mb ;
MACROBLOCKD * const xd = & x -> e_mbd ;
VP9_COMMON * const cm = & cpi -> common ;
int mb_col , mb_row , offset = 0 ;
int mb_y_offset = 0 , arf_y_offset = 0 , gld_y_offset = 0 ;
MV gld_top_mv = {
0 , 0 }
;
MODE_INFO mi_local ;
vp9_zero ( mi_local ) ;
x -> mv_row_min = - BORDER_MV_PIXELS_B16 ;
x -> mv_row_max = ( cm -> mb_rows - 1 ) * 8 + BORDER_MV_PIXELS_B16 ;
xd -> up_available = 0 ;
xd -> plane [ 0 ] . dst . stride = buf -> y_stride ;
xd -> plane [ 0 ] . pre [ 0 ] . stride = buf -> y_stride ;
xd -> plane [ 1 ] . dst . stride = buf -> uv_stride ;
xd -> mi [ 0 ] . src_mi = & mi_local ;
mi_local . mbmi . sb_type = BLOCK_16X16 ;
mi_local . mbmi . ref_frame [ 0 ] = LAST_FRAME ;
mi_local . mbmi . ref_frame [ 1 ] = NONE ;
for ( mb_row = 0 ;
mb_row < cm -> mb_rows ;
mb_row ++ ) {
MV gld_left_mv = gld_top_mv ;
int mb_y_in_offset = mb_y_offset ;
int arf_y_in_offset = arf_y_offset ;
int gld_y_in_offset = gld_y_offset ;
x -> mv_col_min = - BORDER_MV_PIXELS_B16 ;
x -> mv_col_max = ( cm -> mb_cols - 1 ) * 8 + BORDER_MV_PIXELS_B16 ;
xd -> left_available = 0 ;
for ( mb_col = 0 ;
mb_col < cm -> mb_cols ;
mb_col ++ ) {
MBGRAPH_MB_STATS * mb_stats = & stats -> mb_stats [ offset + mb_col ] ;
update_mbgraph_mb_stats ( cpi , mb_stats , buf , mb_y_in_offset , golden_ref , & gld_left_mv , alt_ref , mb_row , mb_col ) ;
gld_left_mv = mb_stats -> ref [ GOLDEN_FRAME ] . m . mv . as_mv ;
if ( mb_col == 0 ) {
gld_top_mv = gld_left_mv ;
}
xd -> left_available = 1 ;
mb_y_in_offset += 16 ;
gld_y_in_offset += 16 ;
arf_y_in_offset += 16 ;
x -> mv_col_min -= 16 ;
x -> mv_col_max -= 16 ;
}
xd -> up_available = 1 ;
mb_y_offset += buf -> y_stride * 16 ;
gld_y_offset += golden_ref -> y_stride * 16 ;
if ( alt_ref ) arf_y_offset += alt_ref -> y_stride * 16 ;
x -> mv_row_min -= 16 ;
x -> mv_row_max -= 16 ;
offset += cm -> mb_cols ;
}
} | 1,238 |
0 | SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
struct user_struct *user;
int ret;
/* Check the IN_* constants for consistency. */
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
ret = -EMFILE;
goto out_free_uid;
}
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
group = inotify_new_group(user, inotify_max_queued_events);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_free_uid;
}
atomic_inc(&user->inotify_devs);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret >= 0)
return ret;
fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
return ret;
} | SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
struct user_struct *user;
int ret;
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
ret = -EMFILE;
goto out_free_uid;
}
group = inotify_new_group(user, inotify_max_queued_events);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_free_uid;
}
atomic_inc(&user->inotify_devs);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret >= 0)
return ret;
fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
return ret;
} | 1,239 |
1 | int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
union {
int val;
struct linger ling;
struct timeval tm;
} v;
unsigned int lv = sizeof(int);
int len;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
v.val = 0;
switch(optname) {
case SO_DEBUG:
v.val = sock_flag(sk, SOCK_DBG);
break;
case SO_DONTROUTE:
v.val = sock_flag(sk, SOCK_LOCALROUTE);
break;
case SO_BROADCAST:
v.val = !!sock_flag(sk, SOCK_BROADCAST);
break;
case SO_SNDBUF:
v.val = sk->sk_sndbuf;
break;
case SO_RCVBUF:
v.val = sk->sk_rcvbuf;
break;
case SO_REUSEADDR:
v.val = sk->sk_reuse;
break;
case SO_KEEPALIVE:
v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
break;
case SO_TYPE:
v.val = sk->sk_type;
break;
case SO_ERROR:
v.val = -sock_error(sk);
if (v.val==0)
v.val = xchg(&sk->sk_err_soft, 0);
break;
case SO_OOBINLINE:
v.val = !!sock_flag(sk, SOCK_URGINLINE);
break;
case SO_NO_CHECK:
v.val = sk->sk_no_check;
break;
case SO_PRIORITY:
v.val = sk->sk_priority;
break;
case SO_LINGER:
lv = sizeof(v.ling);
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
v.ling.l_linger = sk->sk_lingertime / HZ;
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("getsockopt");
break;
case SO_TIMESTAMP:
v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
!sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_TIMESTAMPNS:
v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_RCVTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_SNDTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ;
v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_RCVLOWAT:
v.val = sk->sk_rcvlowat;
break;
case SO_SNDLOWAT:
v.val=1;
break;
case SO_PASSCRED:
v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
break;
case SO_PEERCRED:
if (len > sizeof(sk->sk_peercred))
len = sizeof(sk->sk_peercred);
if (copy_to_user(optval, &sk->sk_peercred, len))
return -EFAULT;
goto lenout;
case SO_PEERNAME:
{
char address[128];
if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
return -ENOTCONN;
if (lv < len)
return -EINVAL;
if (copy_to_user(optval, address, len))
return -EFAULT;
goto lenout;
}
/* Dubious BSD thing... Probably nobody even uses it, but
* the UNIX standard wants it for whatever reason... -DaveM
*/
case SO_ACCEPTCONN:
v.val = sk->sk_state == TCP_LISTEN;
break;
case SO_PASSSEC:
v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
break;
case SO_PEERSEC:
return security_socket_getpeersec_stream(sock, optval, optlen, len);
case SO_MARK:
v.val = sk->sk_mark;
break;
default:
return -ENOPROTOOPT;
}
if (len > lv)
len = lv;
if (copy_to_user(optval, &v, len))
return -EFAULT;
lenout:
if (put_user(len, optlen))
return -EFAULT;
return 0;
} | int sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
union {
int val;
struct linger ling;
struct timeval tm;
} v;
unsigned int lv = sizeof(int);
int len;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
v.val = 0;
switch(optname) {
case SO_DEBUG:
v.val = sock_flag(sk, SOCK_DBG);
break;
case SO_DONTROUTE:
v.val = sock_flag(sk, SOCK_LOCALROUTE);
break;
case SO_BROADCAST:
v.val = !!sock_flag(sk, SOCK_BROADCAST);
break;
case SO_SNDBUF:
v.val = sk->sk_sndbuf;
break;
case SO_RCVBUF:
v.val = sk->sk_rcvbuf;
break;
case SO_REUSEADDR:
v.val = sk->sk_reuse;
break;
case SO_KEEPALIVE:
v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
break;
case SO_TYPE:
v.val = sk->sk_type;
break;
case SO_ERROR:
v.val = -sock_error(sk);
if (v.val==0)
v.val = xchg(&sk->sk_err_soft, 0);
break;
case SO_OOBINLINE:
v.val = !!sock_flag(sk, SOCK_URGINLINE);
break;
case SO_NO_CHECK:
v.val = sk->sk_no_check;
break;
case SO_PRIORITY:
v.val = sk->sk_priority;
break;
case SO_LINGER:
lv = sizeof(v.ling);
v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
v.ling.l_linger = sk->sk_lingertime / HZ;
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("getsockopt");
break;
case SO_TIMESTAMP:
v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
!sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_TIMESTAMPNS:
v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
break;
case SO_RCVTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_SNDTIMEO:
lv=sizeof(struct timeval);
if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
v.tm.tv_sec = 0;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ;
v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
}
break;
case SO_RCVLOWAT:
v.val = sk->sk_rcvlowat;
break;
case SO_SNDLOWAT:
v.val=1;
break;
case SO_PASSCRED:
v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
break;
case SO_PEERCRED:
if (len > sizeof(sk->sk_peercred))
len = sizeof(sk->sk_peercred);
if (copy_to_user(optval, &sk->sk_peercred, len))
return -EFAULT;
goto lenout;
case SO_PEERNAME:
{
char address[128];
if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
return -ENOTCONN;
if (lv < len)
return -EINVAL;
if (copy_to_user(optval, address, len))
return -EFAULT;
goto lenout;
}
case SO_ACCEPTCONN:
v.val = sk->sk_state == TCP_LISTEN;
break;
case SO_PASSSEC:
v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
break;
case SO_PEERSEC:
return security_socket_getpeersec_stream(sock, optval, optlen, len);
case SO_MARK:
v.val = sk->sk_mark;
break;
default:
return -ENOPROTOOPT;
}
if (len > lv)
len = lv;
if (copy_to_user(optval, &v, len))
return -EFAULT;
lenout:
if (put_user(len, optlen))
return -EFAULT;
return 0;
} | 1,242 |
1 | static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *lp = &smc->os;
struct s_skfp_ioctl ioc;
int status = 0;
if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
return -EFAULT;
switch (ioc.cmd) {
case SKFP_GET_STATS: /* Get the driver statistics */
ioc.len = sizeof(lp->MacStat);
status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
? -EFAULT : 0;
break;
case SKFP_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN)) {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
} else {
status = -EPERM;
}
break;
default:
printk("ioctl for %s: unknow cmd: %04x\n", dev->name, ioc.cmd);
status = -EOPNOTSUPP;
} // switch
return status;
} // skfp_ioctl | static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct s_smc *smc = netdev_priv(dev);
skfddi_priv *lp = &smc->os;
struct s_skfp_ioctl ioc;
int status = 0;
if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
return -EFAULT;
switch (ioc.cmd) {
case SKFP_GET_STATS:
ioc.len = sizeof(lp->MacStat);
status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
? -EFAULT : 0;
break;
case SKFP_CLR_STATS:
if (!capable(CAP_NET_ADMIN)) {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
} else {
status = -EPERM;
}
break;
default:
printk("ioctl for %s: unknow cmd: %04x\n", dev->name, ioc.cmd);
status = -EOPNOTSUPP;
}
return status;
} | 1,244 |
0 | static void get_frame_mb ( const AVFrame * frame , int x , int y , uint8_t mb [ ] , int dim ) {
int i , j , cp ;
for ( cp = 0 ;
cp < 3 ;
cp ++ ) {
int stride = frame -> linesize [ cp ] ;
for ( i = 0 ;
i < dim ;
i ++ ) for ( j = 0 ;
j < dim ;
j ++ ) * mb ++ = frame -> data [ cp ] [ ( y + i ) * stride + x + j ] ;
}
} | static void get_frame_mb ( const AVFrame * frame , int x , int y , uint8_t mb [ ] , int dim ) {
int i , j , cp ;
for ( cp = 0 ;
cp < 3 ;
cp ++ ) {
int stride = frame -> linesize [ cp ] ;
for ( i = 0 ;
i < dim ;
i ++ ) for ( j = 0 ;
j < dim ;
j ++ ) * mb ++ = frame -> data [ cp ] [ ( y + i ) * stride + x + j ] ;
}
} | 1,245 |
1 | void CoreUserInputHandler::handleMsg(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
if (!msg.contains(' '))
return;
QString target = msg.section(' ', 0, 0);
QByteArray encMsg = userEncode(target, msg.section(' ', 1));
#ifdef HAVE_QCA2
putPrivmsg(serverEncode(target), encMsg, network()->cipher(target));
#else
putPrivmsg(serverEncode(target), encMsg);
#endif
} | void CoreUserInputHandler::handleMsg(const BufferInfo &bufferInfo, const QString &msg)
{
Q_UNUSED(bufferInfo);
if (!msg.contains(' '))
return;
QString target = msg.section(' ', 0, 0);
QByteArray encMsg = userEncode(target, msg.section(' ', 1));
#ifdef HAVE_QCA2
putPrivmsg(serverEncode(target), encMsg, network()->cipher(target));
#else
putPrivmsg(serverEncode(target), encMsg);
#endif
} | 1,247 |
1 | int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
struct buffer_head *primary = NULL;
struct ext4_group_desc *gdp;
struct inode *inode = NULL;
handle_t *handle;
int gdb_off, gdb_num;
int num_grp_locked = 0;
int err, err2;
gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
ext4_warning(sb, __func__,
"Can't resize non-sparse filesystem further");
return -EPERM;
}
if (ext4_blocks_count(es) + input->blocks_count <
ext4_blocks_count(es)) {
ext4_warning(sb, __func__, "blocks_count overflow");
return -EINVAL;
}
if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
le32_to_cpu(es->s_inodes_count)) {
ext4_warning(sb, __func__, "inodes_count overflow");
return -EINVAL;
}
if (reserved_gdb || gdb_off == 0) {
if (!EXT4_HAS_COMPAT_FEATURE(sb,
EXT4_FEATURE_COMPAT_RESIZE_INODE)
|| !le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext4_warning(sb, __func__,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
inode = ext4_iget(sb, EXT4_RESIZE_INO);
if (IS_ERR(inode)) {
ext4_warning(sb, __func__,
"Error opening resize inode");
return PTR_ERR(inode);
}
}
if ((err = verify_group_input(sb, input)))
goto exit_put;
if ((err = setup_new_group_blocks(sb, input)))
goto exit_put;
/*
* We will always be modifying at least the superblock and a GDT
* block. If we are adding a group past the last current GDT block,
* we will also modify the inode and the dindirect block. If we
* are adding a group with superblock/GDT backups we will also
* modify each of the reserved GDT dindirect blocks.
*/
handle = ext4_journal_start_sb(sb,
ext4_bg_has_super(sb, input->group) ?
3 + reserved_gdb : 4);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto exit_put;
}
lock_super(sb);
if (input->group != sbi->s_groups_count) {
ext4_warning(sb, __func__,
"multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_journal;
}
if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
goto exit_journal;
/*
* We will only either add reserved group blocks to a backup group
* or remove reserved blocks for the first group in a new group block.
* Doing both would be mean more complex code, and sane people don't
* use non-sparse filesystems anymore. This is already checked above.
*/
if (gdb_off) {
primary = sbi->s_group_desc[gdb_num];
if ((err = ext4_journal_get_write_access(handle, primary)))
goto exit_journal;
if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
(err = reserve_backup_gdb(handle, inode, input)))
goto exit_journal;
} else if ((err = add_new_gdb(handle, inode, input, &primary)))
goto exit_journal;
/*
* OK, now we've set up the new group. Time to make it active.
*
* Current kernels don't lock all allocations via lock_super(),
* so we have to be safe wrt. concurrent accesses the group
* data. So we need to be careful to set all of the relevant
* group descriptor data etc. *before* we enable the group.
*
* The key field here is sbi->s_groups_count: as long as
* that retains its old value, nobody is going to access the new
* group.
*
* So first we update all the descriptor metadata for the new
* group; then we update the total disk blocks count; then we
* update the groups count to enable the group; then finally we
* update the free space counts so that the system can start
* using the new disk blocks.
*/
num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group);
/* Update group descriptor block for new group */
gdp = (struct ext4_group_desc *)((char *)primary->b_data +
gdb_off * EXT4_DESC_SIZE(sb));
ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
ext4_free_blks_set(sb, gdp, input->free_blocks_count);
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
/*
* We can allocate memory for mb_alloc based on the new group
* descriptor
*/
err = ext4_mb_add_groupinfo(sb, input->group, gdp);
if (err) {
ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
goto exit_journal;
}
/*
* Make the new blocks and inodes valid next. We do this before
* increasing the group count so that once the group is enabled,
* all of its blocks and inodes are already valid.
*
* We always allocate group-by-group, then block-by-block or
* inode-by-inode within a group, so enabling these
* blocks/inodes before the group is live won't actually let us
* allocate the new space yet.
*/
ext4_blocks_count_set(es, ext4_blocks_count(es) +
input->blocks_count);
le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
/*
* We need to protect s_groups_count against other CPUs seeing
* inconsistent state in the superblock.
*
* The precise rules we use are:
*
* * Writers of s_groups_count *must* hold lock_super
* AND
* * Writers must perform a smp_wmb() after updating all dependent
* data and before modifying the groups count
*
* * Readers must hold lock_super() over the access
* OR
* * Readers must perform an smp_rmb() after reading the groups count
* and before reading any dependent data.
*
* NB. These rules can be relaxed when checking the group count
* while freeing data, as we can only allocate from a block
* group after serialising against the group count, and we can
* only then free after serialising in turn against that
* allocation.
*/
smp_wmb();
/* Update the global fs size fields */
sbi->s_groups_count++;
ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
ext4_handle_dirty_metadata(handle, NULL, primary);
/* Update the reserved block counts only once the new group is
* active. */
ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
input->reserved_blocks);
/* Update the free space counts */
percpu_counter_add(&sbi->s_freeblocks_counter,
input->free_blocks_count);
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb));
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group);
sbi->s_flex_groups[flex_group].free_blocks +=
input->free_blocks_count;
sbi->s_flex_groups[flex_group].free_inodes +=
EXT4_INODES_PER_GROUP(sb);
}
ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
sb->s_dirt = 1;
exit_journal:
unlock_super(sb);
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
if (!err) {
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext4_super_block));
update_backups(sb, primary->b_blocknr, primary->b_data,
primary->b_size);
}
exit_put:
iput(inode);
return err;
} /* ext4_group_add */ | int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
struct buffer_head *primary = NULL;
struct ext4_group_desc *gdp;
struct inode *inode = NULL;
handle_t *handle;
int gdb_off, gdb_num;
int num_grp_locked = 0;
int err, err2;
gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
ext4_warning(sb, __func__,
"Can't resize non-sparse filesystem further");
return -EPERM;
}
if (ext4_blocks_count(es) + input->blocks_count <
ext4_blocks_count(es)) {
ext4_warning(sb, __func__, "blocks_count overflow");
return -EINVAL;
}
if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
le32_to_cpu(es->s_inodes_count)) {
ext4_warning(sb, __func__, "inodes_count overflow");
return -EINVAL;
}
if (reserved_gdb || gdb_off == 0) {
if (!EXT4_HAS_COMPAT_FEATURE(sb,
EXT4_FEATURE_COMPAT_RESIZE_INODE)
|| !le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext4_warning(sb, __func__,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
inode = ext4_iget(sb, EXT4_RESIZE_INO);
if (IS_ERR(inode)) {
ext4_warning(sb, __func__,
"Error opening resize inode");
return PTR_ERR(inode);
}
}
if ((err = verify_group_input(sb, input)))
goto exit_put;
if ((err = setup_new_group_blocks(sb, input)))
goto exit_put;
handle = ext4_journal_start_sb(sb,
ext4_bg_has_super(sb, input->group) ?
3 + reserved_gdb : 4);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto exit_put;
}
lock_super(sb);
if (input->group != sbi->s_groups_count) {
ext4_warning(sb, __func__,
"multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_journal;
}
if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
goto exit_journal;
if (gdb_off) {
primary = sbi->s_group_desc[gdb_num];
if ((err = ext4_journal_get_write_access(handle, primary)))
goto exit_journal;
if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
(err = reserve_backup_gdb(handle, inode, input)))
goto exit_journal;
} else if ((err = add_new_gdb(handle, inode, input, &primary)))
goto exit_journal;
num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group);
gdp = (struct ext4_group_desc *)((char *)primary->b_data +
gdb_off * EXT4_DESC_SIZE(sb));
ext4_block_bitmap_set(sb, gdp, input->block_bitmap);
ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap);
ext4_inode_table_set(sb, gdp, input->inode_table);
ext4_free_blks_set(sb, gdp, input->free_blocks_count);
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
err = ext4_mb_add_groupinfo(sb, input->group, gdp);
if (err) {
ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
goto exit_journal;
}
ext4_blocks_count_set(es, ext4_blocks_count(es) +
input->blocks_count);
le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
smp_wmb();
sbi->s_groups_count++;
ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked);
ext4_handle_dirty_metadata(handle, NULL, primary);
ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
input->reserved_blocks);
percpu_counter_add(&sbi->s_freeblocks_counter,
input->free_blocks_count);
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb));
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group);
sbi->s_flex_groups[flex_group].free_blocks +=
input->free_blocks_count;
sbi->s_flex_groups[flex_group].free_inodes +=
EXT4_INODES_PER_GROUP(sb);
}
ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
sb->s_dirt = 1;
exit_journal:
unlock_super(sb);
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
if (!err) {
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext4_super_block));
update_backups(sb, primary->b_blocknr, primary->b_data,
primary->b_size);
}
exit_put:
iput(inode);
return err;
} | 1,248 |
1 | static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct buffer_head *bh)
{
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct buffer_head *bh2;
struct dx_root *root;
struct dx_frame frames[2], *frame;
struct dx_entry *entries;
struct ext4_dir_entry_2 *de, *de2;
char *data1, *top;
unsigned len;
int retval;
unsigned blocksize;
struct dx_hash_info hinfo;
ext4_lblk_t block;
struct fake_dirent *fde;
blocksize = dir->i_sb->s_blocksize;
dxtrace(printk(KERN_DEBUG "Creating index\n"));
retval = ext4_journal_get_write_access(handle, bh);
if (retval) {
ext4_std_error(dir->i_sb, retval);
brelse(bh);
return retval;
}
root = (struct dx_root *) bh->b_data;
bh2 = ext4_append(handle, dir, &block, &retval);
if (!(bh2)) {
brelse(bh);
return retval;
}
EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
data1 = bh2->b_data;
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len));
len = ((char *) root) + blocksize - (char *) de;
memcpy (data1, de, len);
de = (struct ext4_dir_entry_2 *) data1;
top = data1 + len;
while ((char *)(de2 = ext4_next_entry(de)) < top)
de = de2;
de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de);
/* Initialize the root; the dot dirents already exist */
de = (struct ext4_dir_entry_2 *) (&root->dotdot);
de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2));
memset (&root->info, 0, sizeof(root->info));
root->info.info_length = sizeof(root->info);
root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
entries = root->entries;
dx_set_block(entries, 1);
dx_set_count(entries, 1);
dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
/* Initialize as for dx_probe */
hinfo.hash_version = root->info.hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
ext4fs_dirhash(name, namelen, &hinfo);
frame = frames;
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
bh = bh2;
de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
dx_release (frames);
if (!(de))
return retval;
return add_dirent_to_buf(handle, dentry, inode, de, bh);
} | static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct buffer_head *bh)
{
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct buffer_head *bh2;
struct dx_root *root;
struct dx_frame frames[2], *frame;
struct dx_entry *entries;
struct ext4_dir_entry_2 *de, *de2;
char *data1, *top;
unsigned len;
int retval;
unsigned blocksize;
struct dx_hash_info hinfo;
ext4_lblk_t block;
struct fake_dirent *fde;
blocksize = dir->i_sb->s_blocksize;
dxtrace(printk(KERN_DEBUG "Creating index\n"));
retval = ext4_journal_get_write_access(handle, bh);
if (retval) {
ext4_std_error(dir->i_sb, retval);
brelse(bh);
return retval;
}
root = (struct dx_root *) bh->b_data;
bh2 = ext4_append(handle, dir, &block, &retval);
if (!(bh2)) {
brelse(bh);
return retval;
}
EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
data1 = bh2->b_data;
fde = &root->dotdot;
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len));
len = ((char *) root) + blocksize - (char *) de;
memcpy (data1, de, len);
de = (struct ext4_dir_entry_2 *) data1;
top = data1 + len;
while ((char *)(de2 = ext4_next_entry(de)) < top)
de = de2;
de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de);
de = (struct ext4_dir_entry_2 *) (&root->dotdot);
de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2));
memset (&root->info, 0, sizeof(root->info));
root->info.info_length = sizeof(root->info);
root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
entries = root->entries;
dx_set_block(entries, 1);
dx_set_count(entries, 1);
dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info)));
hinfo.hash_version = root->info.hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
ext4fs_dirhash(name, namelen, &hinfo);
frame = frames;
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
bh = bh2;
de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
dx_release (frames);
if (!(de))
return retval;
return add_dirent_to_buf(handle, dentry, inode, de, bh);
} | 1,250 |
0 | bool tcg_enabled ( void ) {
return tcg_ctx . code_gen_buffer != NULL ;
} | bool tcg_enabled ( void ) {
return tcg_ctx . code_gen_buffer != NULL ;
} | 1,251 |
1 | void qmp_block_commit(const char *device, bool has_base, const char *base, const char *top, bool has_speed, int64_t speed, Error **errp) { BlockDriverState *bs; BlockDriverState *base_bs, *top_bs; Error *local_err = NULL; /* This will be part of the QMP command, if/when the * BlockdevOnError change for blkmirror makes it in */ BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT; /* drain all i/o before commits */ bdrv_drain_all(); bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (base && has_base) { base_bs = bdrv_find_backing_image(bs, base); } else { base_bs = bdrv_find_base(bs); } if (base_bs == NULL) { error_set(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL"); return; } /* default top_bs is the active layer */ top_bs = bs; if (top) { if (strcmp(bs->filename, top) != 0) { top_bs = bdrv_find_backing_image(bs, top); } } if (top_bs == NULL) { error_setg(errp, "Top image file %s not found", top ? top : "NULL"); return; } commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); return; } /* Grab a reference so hotplug does not delete the BlockDriverState from * underneath us. */ drive_get_ref(drive_get_by_blockdev(bs)); } | void qmp_block_commit(const char *device, bool has_base, const char *base, const char *top, bool has_speed, int64_t speed, Error **errp) { BlockDriverState *bs; BlockDriverState *base_bs, *top_bs; Error *local_err = NULL; BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT; bdrv_drain_all(); bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (base && has_base) { base_bs = bdrv_find_backing_image(bs, base); } else { base_bs = bdrv_find_base(bs); } if (base_bs == NULL) { error_set(errp, QERR_BASE_NOT_FOUND, base ? base : "NULL"); return; } top_bs = bs; if (top) { if (strcmp(bs->filename, top) != 0) { top_bs = bdrv_find_backing_image(bs, top); } } if (top_bs == NULL) { error_setg(errp, "Top image file %s not found", top ? top : "NULL"); return; } commit_start(bs, base_bs, top_bs, speed, on_error, block_job_cb, bs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); return; } drive_get_ref(drive_get_by_blockdev(bs)); } | 1,252 |
1 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
const struct sched_class *class;
if (p->sched_class == rq->curr->sched_class) {
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
} else {
for_each_class(class) {
if (class == rq->curr->sched_class)
break;
if (class == p->sched_class) {
resched_task(rq->curr);
break;
}
}
}
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
} | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
const struct sched_class *class;
if (p->sched_class == rq->curr->sched_class) {
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
} else {
for_each_class(class) {
if (class == rq->curr->sched_class)
break;
if (class == p->sched_class) {
resched_task(rq->curr);
break;
}
}
}
if (test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
} | 1,253 |
1 | static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->se.on_rq)
update_rq_clock(rq);
rq->skip_clock_update = 0;
prev->sched_class->put_prev_task(rq, prev);
} | static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->se.on_rq)
update_rq_clock(rq);
rq->skip_clock_update = 0;
prev->sched_class->put_prev_task(rq, prev);
} | 1,254 |
0 | static void nameserver_probe_failed ( struct nameserver * const ns ) {
const struct timeval * timeout ;
( void ) evtimer_del ( & ns -> timeout_event ) ;
if ( ns -> state == 1 ) {
return ;
}
timeout = & global_nameserver_timeouts [ MIN ( ns -> failed_times , global_nameserver_timeouts_length - 1 ) ] ;
ns -> failed_times ++ ;
if ( evtimer_add ( & ns -> timeout_event , ( struct timeval * ) timeout ) < 0 ) {
log ( EVDNS_LOG_WARN , "Error from libevent when adding timer event for %s" , debug_ntoa ( ns -> address ) ) ;
}
} | static void nameserver_probe_failed ( struct nameserver * const ns ) {
const struct timeval * timeout ;
( void ) evtimer_del ( & ns -> timeout_event ) ;
if ( ns -> state == 1 ) {
return ;
}
timeout = & global_nameserver_timeouts [ MIN ( ns -> failed_times , global_nameserver_timeouts_length - 1 ) ] ;
ns -> failed_times ++ ;
if ( evtimer_add ( & ns -> timeout_event , ( struct timeval * ) timeout ) < 0 ) {
log ( EVDNS_LOG_WARN , "Error from libevent when adding timer event for %s" , debug_ntoa ( ns -> address ) ) ;
}
} | 1,255 |
1 | void CoreUserInputHandler::handleSay(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
QByteArray encMsg = channelEncode(bufferInfo.bufferName(), msg);
#ifdef HAVE_QCA2
putPrivmsg(serverEncode(bufferInfo.bufferName()), encMsg, network()->cipher(bufferInfo.bufferName()));
#else
putPrivmsg(serverEncode(bufferInfo.bufferName()), encMsg);
#endif
emit displayMsg(Message::Plain, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
} | void CoreUserInputHandler::handleSay(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return;
QByteArray encMsg = channelEncode(bufferInfo.bufferName(), msg);
#ifdef HAVE_QCA2
putPrivmsg(serverEncode(bufferInfo.bufferName()), encMsg, network()->cipher(bufferInfo.bufferName()));
#else
putPrivmsg(serverEncode(bufferInfo.bufferName()), encMsg);
#endif
emit displayMsg(Message::Plain, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
} | 1,256 |
0 | static int mkv_check_tag(AVDictionary *m) { AVDictionaryEntry *t = NULL; while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX))) if (av_strcasecmp(t->key, "title") && av_strcasecmp(t->key, "stereo_mode")) return 1; return 0; } | static int mkv_check_tag(AVDictionary *m) { AVDictionaryEntry *t = NULL; while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX))) if (av_strcasecmp(t->key, "title") && av_strcasecmp(t->key, "stereo_mode")) return 1; return 0; } | 1,257 |
1 | static int ext4_block_to_path(struct inode *inode,
ext4_lblk_t i_block,
ext4_lblk_t offsets[4], int *boundary)
{
int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
const long direct_blocks = EXT4_NDIR_BLOCKS,
indirect_blocks = ptrs,
double_blocks = (1 << (ptrs_bits * 2));
int n = 0;
int final = 0;
if (i_block < 0) {
ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
} else if (i_block < direct_blocks) {
offsets[n++] = i_block;
final = direct_blocks;
} else if ((i_block -= direct_blocks) < indirect_blocks) {
offsets[n++] = EXT4_IND_BLOCK;
offsets[n++] = i_block;
final = ptrs;
} else if ((i_block -= indirect_blocks) < double_blocks) {
offsets[n++] = EXT4_DIND_BLOCK;
offsets[n++] = i_block >> ptrs_bits;
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
offsets[n++] = EXT4_TIND_BLOCK;
offsets[n++] = i_block >> (ptrs_bits * 2);
offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
ext4_warning(inode->i_sb, "ext4_block_to_path",
"block %lu > max",
i_block + direct_blocks +
indirect_blocks + double_blocks);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
return n;
} | static int ext4_block_to_path(struct inode *inode,
ext4_lblk_t i_block,
ext4_lblk_t offsets[4], int *boundary)
{
int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
const long direct_blocks = EXT4_NDIR_BLOCKS,
indirect_blocks = ptrs,
double_blocks = (1 << (ptrs_bits * 2));
int n = 0;
int final = 0;
if (i_block < 0) {
ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
} else if (i_block < direct_blocks) {
offsets[n++] = i_block;
final = direct_blocks;
} else if ((i_block -= direct_blocks) < indirect_blocks) {
offsets[n++] = EXT4_IND_BLOCK;
offsets[n++] = i_block;
final = ptrs;
} else if ((i_block -= indirect_blocks) < double_blocks) {
offsets[n++] = EXT4_DIND_BLOCK;
offsets[n++] = i_block >> ptrs_bits;
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
offsets[n++] = EXT4_TIND_BLOCK;
offsets[n++] = i_block >> (ptrs_bits * 2);
offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
ext4_warning(inode->i_sb, "ext4_block_to_path",
"block %lu > max",
i_block + direct_blocks +
indirect_blocks + double_blocks);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
return n;
} | 1,258 |
1 | asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
release_kernel_lock(prev);
need_resched_nonpreemptible:
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
/*
* If a worker is going to sleep, notify and
* ask workqueue whether it wants to wake up a
* task to maintain concurrency. If so, wake
* up the task.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
deactivate_task(rq, prev, DEQUEUE_SLEEP);
}
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
if (likely(prev != next)) {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
/*
* The context switch have flipped the stack from under us
* and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
if (unlikely(reacquire_kernel_lock(prev)))
goto need_resched_nonpreemptible;
preempt_enable_no_resched();
if (need_resched())
goto need_resched;
} | asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
release_kernel_lock(prev);
need_resched_nonpreemptible:
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
deactivate_task(rq, prev, DEQUEUE_SLEEP);
}
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
if (likely(prev != next)) {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next);
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
if (unlikely(reacquire_kernel_lock(prev)))
goto need_resched_nonpreemptible;
preempt_enable_no_resched();
if (need_resched())
goto need_resched;
} | 1,259 |
0 | static void prplcb_privacy_deny_removed ( PurpleAccount * account , const char * name ) {
struct im_connection * ic = purple_ic_by_pa ( account ) ;
void * n ;
n = g_slist_find_custom ( ic -> deny , name , ( GCompareFunc ) ic -> acc -> prpl -> handle_cmp ) ;
ic -> deny = g_slist_remove ( ic -> deny , n ) ;
} | static void prplcb_privacy_deny_removed ( PurpleAccount * account , const char * name ) {
struct im_connection * ic = purple_ic_by_pa ( account ) ;
void * n ;
n = g_slist_find_custom ( ic -> deny , name , ( GCompareFunc ) ic -> acc -> prpl -> handle_cmp ) ;
ic -> deny = g_slist_remove ( ic -> deny , n ) ;
} | 1,260 |
0 | int ff_h2645_extract_rbsp(const uint8_t *src, int length, H2645NAL *nal) { int i, si, di; uint8_t *dst; nal->skipped_bytes = 0; #define STARTCODE_TEST \ if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \ if (src[i + 2] != 3 && src[i + 2] != 0) { \ /* startcode, so we must be past the end */ \ length = i; \ } \ break; \ } #if HAVE_FAST_UNALIGNED #define FIND_FIRST_ZERO \ if (i > 0 && !src[i]) \ i--; \ while (src[i]) \ i++ #if HAVE_FAST_64BIT for (i = 0; i + 1 < length; i += 9) { if (!((~AV_RN64A(src + i) & (AV_RN64A(src + i) - 0x0100010001000101ULL)) & 0x8000800080008080ULL)) continue; FIND_FIRST_ZERO; STARTCODE_TEST; i -= 7; } #else for (i = 0; i + 1 < length; i += 5) { if (!((~AV_RN32A(src + i) & (AV_RN32A(src + i) - 0x01000101U)) & 0x80008080U)) continue; FIND_FIRST_ZERO; STARTCODE_TEST; i -= 3; } #endif /* HAVE_FAST_64BIT */ #else for (i = 0; i + 1 < length; i += 2) { if (src[i]) continue; if (i > 0 && src[i - 1] == 0) i--; STARTCODE_TEST; } #endif /* HAVE_FAST_UNALIGNED */ if (i >= length - 1) { // no escaped 0 nal->data = nal->raw_data = src; nal->size = nal->raw_size = length; return length; } av_fast_malloc(&nal->rbsp_buffer, &nal->rbsp_buffer_size, length + AV_INPUT_BUFFER_PADDING_SIZE); if (!nal->rbsp_buffer) return AVERROR(ENOMEM); dst = nal->rbsp_buffer; memcpy(dst, src, i); si = di = i; while (si + 2 < length) { // remove escapes (very rare 1:2^22) if (src[si + 2] > 3) { dst[di++] = src[si++]; dst[di++] = src[si++]; } else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) { if (src[si + 2] == 3) { // escape dst[di++] = 0; dst[di++] = 0; si += 3; if (nal->skipped_bytes_pos) { nal->skipped_bytes++; if (nal->skipped_bytes_pos_size < nal->skipped_bytes) { nal->skipped_bytes_pos_size *= 2; av_assert0(nal->skipped_bytes_pos_size >= nal->skipped_bytes); av_reallocp_array(&nal->skipped_bytes_pos, nal->skipped_bytes_pos_size, sizeof(*nal->skipped_bytes_pos)); if (!nal->skipped_bytes_pos) { nal->skipped_bytes_pos_size = 0; return AVERROR(ENOMEM); } } if (nal->skipped_bytes_pos) nal->skipped_bytes_pos[nal->skipped_bytes-1] = di - 1; } continue; } else // next start code goto nsc; } dst[di++] = src[si++]; } while (si < length) dst[di++] = src[si++]; nsc: memset(dst + di, 0, AV_INPUT_BUFFER_PADDING_SIZE); nal->data = dst; nal->size = di; nal->raw_data = src; nal->raw_size = si; return si; } | int ff_h2645_extract_rbsp(const uint8_t *src, int length, H2645NAL *nal) { int i, si, di; uint8_t *dst; nal->skipped_bytes = 0; #define STARTCODE_TEST \ if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \ if (src[i + 2] != 3 && src[i + 2] != 0) { \ \ length = i; \ } \ break; \ } #if HAVE_FAST_UNALIGNED #define FIND_FIRST_ZERO \ if (i > 0 && !src[i]) \ i--; \ while (src[i]) \ i++ #if HAVE_FAST_64BIT for (i = 0; i + 1 < length; i += 9) { if (!((~AV_RN64A(src + i) & (AV_RN64A(src + i) - 0x0100010001000101ULL)) & 0x8000800080008080ULL)) continue; FIND_FIRST_ZERO; STARTCODE_TEST; i -= 7; } #else for (i = 0; i + 1 < length; i += 5) { if (!((~AV_RN32A(src + i) & (AV_RN32A(src + i) - 0x01000101U)) & 0x80008080U)) continue; FIND_FIRST_ZERO; STARTCODE_TEST; i -= 3; } #endif #else for (i = 0; i + 1 < length; i += 2) { if (src[i]) continue; if (i > 0 && src[i - 1] == 0) i--; STARTCODE_TEST; } #endif if (i >= length - 1) { | 1,261 |
1 |
static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
{
return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
le32_to_cpu(raw_inode->i_size_lo); |
static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
{
return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
le32_to_cpu(raw_inode->i_size_lo); | 1,262 |
0 | static int rds_notify_cong ( struct rds_sock * rs , struct msghdr * msghdr ) {
uint64_t notify = rs -> rs_cong_notify ;
unsigned long flags ;
int err ;
err = put_cmsg ( msghdr , SOL_RDS , RDS_CMSG_CONG_UPDATE , sizeof ( notify ) , & notify ) ;
if ( err ) return err ;
spin_lock_irqsave ( & rs -> rs_lock , flags ) ;
rs -> rs_cong_notify &= ~ notify ;
spin_unlock_irqrestore ( & rs -> rs_lock , flags ) ;
return 0 ;
} | static int rds_notify_cong ( struct rds_sock * rs , struct msghdr * msghdr ) {
uint64_t notify = rs -> rs_cong_notify ;
unsigned long flags ;
int err ;
err = put_cmsg ( msghdr , SOL_RDS , RDS_CMSG_CONG_UPDATE , sizeof ( notify ) , & notify ) ;
if ( err ) return err ;
spin_lock_irqsave ( & rs -> rs_lock , flags ) ;
rs -> rs_cong_notify &= ~ notify ;
spin_unlock_irqrestore ( & rs -> rs_lock , flags ) ;
return 0 ;
} | 1,263 |
1 | void CoreUserInputHandler::putPrivmsg(const QByteArray &target, const QByteArray &message, Cipher *cipher)
{
// Encrypted messages need special care. There's no clear relation between cleartext and encrypted message length,
// so we can't just compute the maxSplitPos. Instead, we need to loop through the splitpoints until the crypted
// version is short enough...
// TODO: check out how the various possible encryption methods behave length-wise and make
// this clean by predicting the length of the crypted msg.
// For example, blowfish-ebc seems to create 8-char chunks.
static const char *cmd = "PRIVMSG";
static const char *splitter = " .,-!?";
int maxSplitPos = message.count();
int splitPos = maxSplitPos;
forever {
QByteArray crypted = message.left(splitPos);
bool isEncrypted = false;
#ifdef HAVE_QCA2
if (cipher && !cipher->key().isEmpty() && !message.isEmpty()) {
isEncrypted = cipher->encrypt(crypted);
}
#endif
int overrun = lastParamOverrun(cmd, QList<QByteArray>() << target << crypted);
if (overrun) {
// In case this is not an encrypted msg, we can just cut off at the end
if (!isEncrypted)
maxSplitPos = message.count() - overrun;
splitPos = -1;
for (const char *splitChar = splitter; *splitChar != 0; splitChar++) {
splitPos = qMax(splitPos, message.lastIndexOf(*splitChar, maxSplitPos) + 1); // keep split char on old line
}
if (splitPos <= 0 || splitPos > maxSplitPos)
splitPos = maxSplitPos;
maxSplitPos = splitPos - 1;
if (maxSplitPos <= 0) { // this should never happen, but who knows...
qWarning() << tr("[Error] Could not encrypt your message: %1").arg(message.data());
return;
}
continue; // we never come back here for !encrypted!
}
// now we have found a valid splitpos (or didn't need to split to begin with)
putCmd(cmd, QList<QByteArray>() << target << crypted);
if (splitPos < message.count())
putPrivmsg(target, message.mid(splitPos), cipher);
return;
}
} | void CoreUserInputHandler::putPrivmsg(const QByteArray &target, const QByteArray &message, Cipher *cipher)
{
static const char *cmd = "PRIVMSG";
static const char *splitter = " .,-!?";
int maxSplitPos = message.count();
int splitPos = maxSplitPos;
forever {
QByteArray crypted = message.left(splitPos);
bool isEncrypted = false;
#ifdef HAVE_QCA2
if (cipher && !cipher->key().isEmpty() && !message.isEmpty()) {
isEncrypted = cipher->encrypt(crypted);
}
#endif
int overrun = lastParamOverrun(cmd, QList<QByteArray>() << target << crypted);
if (overrun) {
if (!isEncrypted)
maxSplitPos = message.count() - overrun;
splitPos = -1;
for (const char *splitChar = splitter; *splitChar != 0; splitChar++) {
splitPos = qMax(splitPos, message.lastIndexOf(*splitChar, maxSplitPos) + 1);
}
if (splitPos <= 0 || splitPos > maxSplitPos)
splitPos = maxSplitPos;
maxSplitPos = splitPos - 1;
if (maxSplitPos <= 0) {
qWarning() << tr("[Error] Could not encrypt your message: %1").arg(message.data());
return;
}
continue;
}
putCmd(cmd, QList<QByteArray>() << target << crypted);
if (splitPos < message.count())
putPrivmsg(target, message.mid(splitPos), cipher);
return;
}
} | 1,264 |
1 | __acquires(kernel_lock)
{
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi;
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
unsigned long offset = 0;
unsigned long journal_devnum = 0;
unsigned long def_mount_opts;
struct inode *root;
char *cp;
const char *descr;
int ret = -EINVAL;
int blocksize;
int db_count;
int i;
int needs_recovery, has_huge_files;
int features;
__u64 blocks_count;
int err;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
sbi->s_mount_opt = 0;
sbi->s_resuid = EXT4_DEF_RESUID;
sbi->s_resgid = EXT4_DEF_RESGID;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sb_block = sb_block;
unlock_kernel();
/* Cleanup superblock name */
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
*cp = '!';
blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
if (!blocksize) {
printk(KERN_ERR "EXT4-fs: unable to set blocksize\n");
goto out_fail;
}
/*
* The ext4 superblock will not be buffer aligned for other than 1kB
* block sizes. We need to calculate the offset from buffer start.
*/
if (blocksize != EXT4_MIN_BLOCK_SIZE) {
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
} else {
logical_sb_block = sb_block;
}
if (!(bh = sb_bread(sb, logical_sb_block))) {
printk(KERN_ERR "EXT4-fs: unable to read superblock\n");
goto out_fail;
}
/*
* Note: s_es must be initialized as soon as possible because
* some ext4 macro-instructions depend on its value
*/
es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT4_SUPER_MAGIC)
goto cantfind_ext4;
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT4_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
set_opt(sbi->s_mount_opt, GRPID);
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT4_FS_XATTR
if (def_mount_opts & EXT4_DEFM_XATTR_USER)
set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (def_mount_opts & EXT4_DEFM_ACL)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA;
if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
else
set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
set_opt(sbi->s_mount_opt, RESERVATION);
set_opt(sbi->s_mount_opt, BARRIER);
/*
* turn on extents feature by default in ext4 filesystem
* only if feature flag already set by mkfs or tune2fs.
* Use -o noextents to turn it off
*/
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
set_opt(sbi->s_mount_opt, EXTENTS);
else
ext4_warning(sb, __func__,
"extents feature not enabled on this filesystem, "
"use tune2fs.");
/*
* enable delayed allocation by default
* Use -o nodelalloc to turn it off
*/
set_opt(sbi->s_mount_opt, DELALLOC);
if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, NULL, 0))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U)))
printk(KERN_WARNING
"EXT4-fs warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended\n");
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
features = EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP);
if (features) {
printk(KERN_ERR "EXT4-fs: %s: couldn't mount because of "
"unsupported optional features (%x).\n", sb->s_id,
(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
~EXT4_FEATURE_INCOMPAT_SUPP));
goto failed_mount;
}
features = EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP);
if (!(sb->s_flags & MS_RDONLY) && features) {
printk(KERN_ERR "EXT4-fs: %s: couldn't mount RDWR because of "
"unsupported optional features (%x).\n", sb->s_id,
(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
~EXT4_FEATURE_RO_COMPAT_SUPP));
goto failed_mount;
}
has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
if (has_huge_files) {
/*
* Large file size enabled file system can only be
* mount if kernel is build with CONFIG_LBD
*/
if (sizeof(root->i_blocks) < sizeof(u64) &&
!(sb->s_flags & MS_RDONLY)) {
printk(KERN_ERR "EXT4-fs: %s: Filesystem with huge "
"files cannot be mounted read-write "
"without CONFIG_LBD.\n", sb->s_id);
goto failed_mount;
}
}
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
printk(KERN_ERR
"EXT4-fs: Unsupported filesystem blocksize %d on %s.\n",
blocksize, sb->s_id);
goto failed_mount;
}
if (sb->s_blocksize != blocksize) {
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
printk(KERN_ERR "EXT4-fs: bad block size %d.\n",
blocksize);
goto failed_mount;
}
brelse(bh);
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
bh = sb_bread(sb, logical_sb_block);
if (!bh) {
printk(KERN_ERR
"EXT4-fs: Can't read superblock on 2nd try.\n");
goto failed_mount;
}
es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
printk(KERN_ERR
"EXT4-fs: Magic mismatch, very weird !\n");
goto failed_mount;
}
}
sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
has_huge_files);
sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
printk(KERN_ERR
"EXT4-fs: unsupported inode size: %d\n",
sbi->s_inode_size);
goto failed_mount;
}
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
}
sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) {
if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
!is_power_of_2(sbi->s_desc_size)) {
printk(KERN_ERR
"EXT4-fs: unsupported descriptor size %lu\n",
sbi->s_desc_size);
goto failed_mount;
}
} else
sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
sbi->s_def_hash_version = es->s_def_hash_version;
i = le32_to_cpu(es->s_flags);
if (i & EXT2_FLAGS_UNSIGNED_HASH)
sbi->s_hash_unsigned = 3;
else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
sbi->s_hash_unsigned = 3;
#else
es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
#endif
sb->s_dirt = 1;
}
if (sbi->s_blocks_per_group > blocksize * 8) {
printk(KERN_ERR
"EXT4-fs: #blocks per group too big: %lu\n",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > blocksize * 8) {
printk(KERN_ERR
"EXT4-fs: #inodes per group too big: %lu\n",
sbi->s_inodes_per_group);
goto failed_mount;
}
if (ext4_blocks_count(es) >
(sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
printk(KERN_ERR "EXT4-fs: filesystem on %s:"
" too large to mount safely\n", sb->s_id);
if (sizeof(sector_t) < 8)
printk(KERN_WARNING "EXT4-fs: CONFIG_LBD not "
"enabled\n");
goto failed_mount;
}
if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext4;
/* ensure blocks_count calculation below doesn't sign-extend */
if (ext4_blocks_count(es) + EXT4_BLOCKS_PER_GROUP(sb) <
le32_to_cpu(es->s_first_data_block) + 1) {
printk(KERN_WARNING "EXT4-fs: bad geometry: block count %llu, "
"first data block %u, blocks per group %lu\n",
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
goto failed_mount;
}
blocks_count = (ext4_blocks_count(es) -
le32_to_cpu(es->s_first_data_block) +
EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
sbi->s_groups_count = blocks_count;
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
printk(KERN_ERR "EXT4-fs: not enough memory\n");
goto failed_mount;
}
#ifdef CONFIG_PROC_FS
if (ext4_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root);
if (sbi->s_proc)
proc_create_data("inode_readahead_blks", 0644, sbi->s_proc,
&ext4_ui_proc_fops,
&sbi->s_inode_readahead_blks);
#endif
bgl_lock_init(&sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
printk(KERN_ERR "EXT4-fs: "
"can't read group descriptor %d\n", i);
db_count = i;
goto failed_mount2;
}
}
if (!ext4_check_descriptors(sb)) {
printk(KERN_ERR "EXT4-fs: group descriptors corrupted!\n");
goto failed_mount2;
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
printk(KERN_ERR
"EXT4-fs: unable to initialize "
"flex_bg meta info!\n");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
}
if (err) {
printk(KERN_ERR "EXT4-fs: insufficient memory\n");
goto failed_mount3;
}
sbi->s_stripe = ext4_get_stripe_size(sbi);
/*
* set up enough so that it can read an inode
*/
sb->s_op = &ext4_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->s_qcop = &ext4_qctl_operations;
sb->dq_op = &ext4_quota_operations;
#endif
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
sb->s_root = NULL;
needs_recovery = (es->s_last_orphan != 0 ||
EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_RECOVER));
/*
* The first inode we look at is the journal inode. Don't try
* root first: it may be modified in the journal!
*/
if (!test_opt(sb, NOLOAD) &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext4_load_journal(sb, es, journal_devnum))
goto failed_mount3;
if (!(sb->s_flags & MS_RDONLY) &&
EXT4_SB(sb)->s_journal->j_failed_commit) {
printk(KERN_CRIT "EXT4-fs error (device %s): "
"ext4_fill_super: Journal transaction "
"%u is corrupt\n", sb->s_id,
EXT4_SB(sb)->s_journal->j_failed_commit);
if (test_opt(sb, ERRORS_RO)) {
printk(KERN_CRIT
"Mounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
}
if (test_opt(sb, ERRORS_PANIC)) {
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
ext4_commit_super(sb, es, 1);
goto failed_mount4;
}
}
} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
printk(KERN_ERR "EXT4-fs: required journal recovery "
"suppressed and not mounted read-only\n");
goto failed_mount4;
} else {
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
sbi->s_journal = NULL;
needs_recovery = 0;
goto no_journal;
}
if (ext4_blocks_count(es) > 0xffffffffULL &&
!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT)) {
printk(KERN_ERR "ext4: Failed to set 64-bit journal feature\n");
goto failed_mount4;
}
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
jbd2_journal_set_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
jbd2_journal_set_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
jbd2_journal_clear_features(sbi->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
} else {
jbd2_journal_clear_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
}
/* We have now updated the journal if required, so we can
* validate the data journaling mode. */
switch (test_opt(sb, DATA_FLAGS)) {
case 0:
/* No mode set, assume a default based on the journal
* capabilities: ORDERED_DATA if the journal can
* cope, else JOURNAL_DATA
*/
if (jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
set_opt(sbi->s_mount_opt, ORDERED_DATA);
else
set_opt(sbi->s_mount_opt, JOURNAL_DATA);
break;
case EXT4_MOUNT_ORDERED_DATA:
case EXT4_MOUNT_WRITEBACK_DATA:
if (!jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
printk(KERN_ERR "EXT4-fs: Journal does not support "
"requested data journaling mode\n");
goto failed_mount4;
}
default:
break;
}
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
no_journal:
if (test_opt(sb, NOBH)) {
if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
printk(KERN_WARNING "EXT4-fs: Ignoring nobh option - "
"its supported only with writeback mode\n");
clear_opt(sbi->s_mount_opt, NOBH);
}
}
/*
* The jbd2_journal_load will have done any necessary log recovery,
* so we can safely mount the rest of the filesystem now.
*/
root = ext4_iget(sb, EXT4_ROOT_INO);
if (IS_ERR(root)) {
printk(KERN_ERR "EXT4-fs: get root inode failed\n");
ret = PTR_ERR(root);
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n");
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
printk(KERN_ERR "EXT4-fs: get root dentry failed\n");
iput(root);
ret = -ENOMEM;
goto failed_mount4;
}
ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_want_extra_isize);
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_min_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_min_extra_isize);
}
}
/* Check if enough inode space is available */
if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
sbi->s_inode_size) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
printk(KERN_INFO "EXT4-fs: required extra inode space not"
"available.\n");
}
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
printk(KERN_WARNING "EXT4-fs: Ignoring delalloc option - "
"requested data journaling mode\n");
clear_opt(sbi->s_mount_opt, DELALLOC);
} else if (test_opt(sb, DELALLOC))
printk(KERN_INFO "EXT4-fs: delayed allocation enabled\n");
ext4_ext_init(sb);
err = ext4_mb_init(sb, needs_recovery);
if (err) {
printk(KERN_ERR "EXT4-fs: failed to initalize mballoc (%d)\n",
err);
goto failed_mount4;
}
/*
* akpm: core read_super() calls in here with the superblock locked.
* That deadlocks, because orphan cleanup needs to lock the superblock
* in numerous places. Here we just pop the lock - it's relatively
* harmless, because we are now ready to accept write_super() requests,
* and aviro says that's the only reason for hanging onto the
* superblock lock.
*/
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
if (needs_recovery) {
printk(KERN_INFO "EXT4-fs: recovery complete.\n");
ext4_mark_recovery_complete(sb, es);
}
if (EXT4_SB(sb)->s_journal) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
descr = " journalled data mode";
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
descr = " ordered data mode";
else
descr = " writeback data mode";
} else
descr = "out journal";
printk(KERN_INFO "EXT4-fs: mounted filesystem %s with%s\n",
sb->s_id, descr);
lock_kernel();
return 0;
cantfind_ext4:
if (!silent)
printk(KERN_ERR "VFS: Can't find ext4 filesystem on dev %s.\n",
sb->s_id);
goto failed_mount;
failed_mount4:
printk(KERN_ERR "EXT4-fs (device %s): mount failed\n", sb->s_id);
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
kfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_proc) {
remove_proc_entry("inode_readahead_blks", sbi->s_proc);
remove_proc_entry(sb->s_id, ext4_proc_root);
}
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
ext4_blkdev_remove(sbi);
brelse(bh);
out_fail:
sb->s_fs_info = NULL;
kfree(sbi);
lock_kernel();
return ret;
} | __acquires(kernel_lock)
{
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi;
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
unsigned long offset = 0;
unsigned long journal_devnum = 0;
unsigned long def_mount_opts;
struct inode *root;
char *cp;
const char *descr;
int ret = -EINVAL;
int blocksize;
int db_count;
int i;
int needs_recovery, has_huge_files;
int features;
__u64 blocks_count;
int err;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
sbi->s_mount_opt = 0;
sbi->s_resuid = EXT4_DEF_RESUID;
sbi->s_resgid = EXT4_DEF_RESGID;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sb_block = sb_block;
unlock_kernel();
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
*cp = '!';
blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
if (!blocksize) {
printk(KERN_ERR "EXT4-fs: unable to set blocksize\n");
goto out_fail;
}
if (blocksize != EXT4_MIN_BLOCK_SIZE) {
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
} else {
logical_sb_block = sb_block;
}
if (!(bh = sb_bread(sb, logical_sb_block))) {
printk(KERN_ERR "EXT4-fs: unable to read superblock\n");
goto out_fail;
}
es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
sb->s_magic = le16_to_cpu(es->s_magic);
if (sb->s_magic != EXT4_SUPER_MAGIC)
goto cantfind_ext4;
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT4_DEFM_DEBUG)
set_opt(sbi->s_mount_opt, DEBUG);
if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
set_opt(sbi->s_mount_opt, GRPID);
if (def_mount_opts & EXT4_DEFM_UID16)
set_opt(sbi->s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT4_FS_XATTR
if (def_mount_opts & EXT4_DEFM_XATTR_USER)
set_opt(sbi->s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT4_FS_POSIX_ACL
if (def_mount_opts & EXT4_DEFM_ACL)
set_opt(sbi->s_mount_opt, POSIX_ACL);
#endif
if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA;
if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
else
set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
set_opt(sbi->s_mount_opt, RESERVATION);
set_opt(sbi->s_mount_opt, BARRIER);
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
set_opt(sbi->s_mount_opt, EXTENTS);
else
ext4_warning(sb, __func__,
"extents feature not enabled on this filesystem, "
"use tune2fs.");
set_opt(sbi->s_mount_opt, DELALLOC);
if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, NULL, 0))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U)))
printk(KERN_WARNING
"EXT4-fs warning: feature flags set on rev 0 fs, "
"running e2fsck is recommended\n");
features = EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP);
if (features) {
printk(KERN_ERR "EXT4-fs: %s: couldn't mount because of "
"unsupported optional features (%x).\n", sb->s_id,
(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
~EXT4_FEATURE_INCOMPAT_SUPP));
goto failed_mount;
}
features = EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP);
if (!(sb->s_flags & MS_RDONLY) && features) {
printk(KERN_ERR "EXT4-fs: %s: couldn't mount RDWR because of "
"unsupported optional features (%x).\n", sb->s_id,
(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
~EXT4_FEATURE_RO_COMPAT_SUPP));
goto failed_mount;
}
has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
if (has_huge_files) {
if (sizeof(root->i_blocks) < sizeof(u64) &&
!(sb->s_flags & MS_RDONLY)) {
printk(KERN_ERR "EXT4-fs: %s: Filesystem with huge "
"files cannot be mounted read-write "
"without CONFIG_LBD.\n", sb->s_id);
goto failed_mount;
}
}
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
printk(KERN_ERR
"EXT4-fs: Unsupported filesystem blocksize %d on %s.\n",
blocksize, sb->s_id);
goto failed_mount;
}
if (sb->s_blocksize != blocksize) {
if (!sb_set_blocksize(sb, blocksize)) {
printk(KERN_ERR "EXT4-fs: bad block size %d.\n",
blocksize);
goto failed_mount;
}
brelse(bh);
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
offset = do_div(logical_sb_block, blocksize);
bh = sb_bread(sb, logical_sb_block);
if (!bh) {
printk(KERN_ERR
"EXT4-fs: Can't read superblock on 2nd try.\n");
goto failed_mount;
}
es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
printk(KERN_ERR
"EXT4-fs: Magic mismatch, very weird !\n");
goto failed_mount;
}
}
sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
has_huge_files);
sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
printk(KERN_ERR
"EXT4-fs: unsupported inode size: %d\n",
sbi->s_inode_size);
goto failed_mount;
}
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
}
sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) {
if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
!is_power_of_2(sbi->s_desc_size)) {
printk(KERN_ERR
"EXT4-fs: unsupported descriptor size %lu\n",
sbi->s_desc_size);
goto failed_mount;
}
} else
sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
sbi->s_sbh = bh;
sbi->s_mount_state = le16_to_cpu(es->s_state);
sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
sbi->s_def_hash_version = es->s_def_hash_version;
i = le32_to_cpu(es->s_flags);
if (i & EXT2_FLAGS_UNSIGNED_HASH)
sbi->s_hash_unsigned = 3;
else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
sbi->s_hash_unsigned = 3;
#else
es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
#endif
sb->s_dirt = 1;
}
if (sbi->s_blocks_per_group > blocksize * 8) {
printk(KERN_ERR
"EXT4-fs: #blocks per group too big: %lu\n",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > blocksize * 8) {
printk(KERN_ERR
"EXT4-fs: #inodes per group too big: %lu\n",
sbi->s_inodes_per_group);
goto failed_mount;
}
if (ext4_blocks_count(es) >
(sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
printk(KERN_ERR "EXT4-fs: filesystem on %s:"
" too large to mount safely\n", sb->s_id);
if (sizeof(sector_t) < 8)
printk(KERN_WARNING "EXT4-fs: CONFIG_LBD not "
"enabled\n");
goto failed_mount;
}
if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext4;
if (ext4_blocks_count(es) + EXT4_BLOCKS_PER_GROUP(sb) <
le32_to_cpu(es->s_first_data_block) + 1) {
printk(KERN_WARNING "EXT4-fs: bad geometry: block count %llu, "
"first data block %u, blocks per group %lu\n",
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
goto failed_mount;
}
blocks_count = (ext4_blocks_count(es) -
le32_to_cpu(es->s_first_data_block) +
EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
sbi->s_groups_count = blocks_count;
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
printk(KERN_ERR "EXT4-fs: not enough memory\n");
goto failed_mount;
}
#ifdef CONFIG_PROC_FS
if (ext4_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root);
if (sbi->s_proc)
proc_create_data("inode_readahead_blks", 0644, sbi->s_proc,
&ext4_ui_proc_fops,
&sbi->s_inode_readahead_blks);
#endif
bgl_lock_init(&sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
printk(KERN_ERR "EXT4-fs: "
"can't read group descriptor %d\n", i);
db_count = i;
goto failed_mount2;
}
}
if (!ext4_check_descriptors(sb)) {
printk(KERN_ERR "EXT4-fs: group descriptors corrupted!\n");
goto failed_mount2;
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
printk(KERN_ERR
"EXT4-fs: unable to initialize "
"flex_bg meta info!\n");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
}
if (err) {
printk(KERN_ERR "EXT4-fs: insufficient memory\n");
goto failed_mount3;
}
sbi->s_stripe = ext4_get_stripe_size(sbi);
sb->s_op = &ext4_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_QUOTA
sb->s_qcop = &ext4_qctl_operations;
sb->dq_op = &ext4_quota_operations;
#endif
INIT_LIST_HEAD(&sbi->s_orphan);
sb->s_root = NULL;
needs_recovery = (es->s_last_orphan != 0 ||
EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_RECOVER));
if (!test_opt(sb, NOLOAD) &&
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext4_load_journal(sb, es, journal_devnum))
goto failed_mount3;
if (!(sb->s_flags & MS_RDONLY) &&
EXT4_SB(sb)->s_journal->j_failed_commit) {
printk(KERN_CRIT "EXT4-fs error (device %s): "
"ext4_fill_super: Journal transaction "
"%u is corrupt\n", sb->s_id,
EXT4_SB(sb)->s_journal->j_failed_commit);
if (test_opt(sb, ERRORS_RO)) {
printk(KERN_CRIT
"Mounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
}
if (test_opt(sb, ERRORS_PANIC)) {
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
ext4_commit_super(sb, es, 1);
goto failed_mount4;
}
}
} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
printk(KERN_ERR "EXT4-fs: required journal recovery "
"suppressed and not mounted read-only\n");
goto failed_mount4;
} else {
clear_opt(sbi->s_mount_opt, DATA_FLAGS);
set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
sbi->s_journal = NULL;
needs_recovery = 0;
goto no_journal;
}
if (ext4_blocks_count(es) > 0xffffffffULL &&
!jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_64BIT)) {
printk(KERN_ERR "ext4: Failed to set 64-bit journal feature\n");
goto failed_mount4;
}
if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
jbd2_journal_set_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
jbd2_journal_set_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
jbd2_journal_clear_features(sbi->s_journal, 0, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
} else {
jbd2_journal_clear_features(sbi->s_journal,
JBD2_FEATURE_COMPAT_CHECKSUM, 0,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
}
switch (test_opt(sb, DATA_FLAGS)) {
case 0:
if (jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
set_opt(sbi->s_mount_opt, ORDERED_DATA);
else
set_opt(sbi->s_mount_opt, JOURNAL_DATA);
break;
case EXT4_MOUNT_ORDERED_DATA:
case EXT4_MOUNT_WRITEBACK_DATA:
if (!jbd2_journal_check_available_features
(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
printk(KERN_ERR "EXT4-fs: Journal does not support "
"requested data journaling mode\n");
goto failed_mount4;
}
default:
break;
}
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
no_journal:
if (test_opt(sb, NOBH)) {
if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
printk(KERN_WARNING "EXT4-fs: Ignoring nobh option - "
"its supported only with writeback mode\n");
clear_opt(sbi->s_mount_opt, NOBH);
}
}
root = ext4_iget(sb, EXT4_ROOT_INO);
if (IS_ERR(root)) {
printk(KERN_ERR "EXT4-fs: get root inode failed\n");
ret = PTR_ERR(root);
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n");
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
printk(KERN_ERR "EXT4-fs: get root dentry failed\n");
iput(root);
ret = -ENOMEM;
goto failed_mount4;
}
ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_want_extra_isize);
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_min_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_min_extra_isize);
}
}
if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
sbi->s_inode_size) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
printk(KERN_INFO "EXT4-fs: required extra inode space not"
"available.\n");
}
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
printk(KERN_WARNING "EXT4-fs: Ignoring delalloc option - "
"requested data journaling mode\n");
clear_opt(sbi->s_mount_opt, DELALLOC);
} else if (test_opt(sb, DELALLOC))
printk(KERN_INFO "EXT4-fs: delayed allocation enabled\n");
ext4_ext_init(sb);
err = ext4_mb_init(sb, needs_recovery);
if (err) {
printk(KERN_ERR "EXT4-fs: failed to initalize mballoc (%d)\n",
err);
goto failed_mount4;
}
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
if (needs_recovery) {
printk(KERN_INFO "EXT4-fs: recovery complete.\n");
ext4_mark_recovery_complete(sb, es);
}
if (EXT4_SB(sb)->s_journal) {
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
descr = " journalled data mode";
else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
descr = " ordered data mode";
else
descr = " writeback data mode";
} else
descr = "out journal";
printk(KERN_INFO "EXT4-fs: mounted filesystem %s with%s\n",
sb->s_id, descr);
lock_kernel();
return 0;
cantfind_ext4:
if (!silent)
printk(KERN_ERR "VFS: Can't find ext4 filesystem on dev %s.\n",
sb->s_id);
goto failed_mount;
failed_mount4:
printk(KERN_ERR "EXT4-fs (device %s): mount failed\n", sb->s_id);
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
kfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_proc) {
remove_proc_entry("inode_readahead_blks", sbi->s_proc);
remove_proc_entry(sb->s_id, ext4_proc_root);
}
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
ext4_blkdev_remove(sbi);
brelse(bh);
out_fail:
sb->s_fs_info = NULL;
kfree(sbi);
lock_kernel();
return ret;
} | 1,265 |
1 | static int encode_mode(CinepakEncContext *s, CinepakMode mode, int h, int v1_size, int v4_size, int v4, AVPicture *scratch_pict, strip_info *info, unsigned char *buf) { int x, y, z, flags, bits, temp_size, header_ofs, ret = 0, mb_count = s->w * h / MB_AREA; int needs_extra_bit, should_write_temp; unsigned char temp[64]; //32/2 = 16 V4 blocks at 4 B each -> 64 B mb_info *mb; AVPicture sub_scratch; //encode codebooks if(v1_size) ret += encode_codebook(s, info->v1_codebook, v1_size, 0x22, 0x26, buf + ret); if(v4_size) ret += encode_codebook(s, info->v4_codebook, v4_size, 0x20, 0x24, buf + ret); //update scratch picture for(z = y = 0; y < h; y += MB_SIZE) { for(x = 0; x < s->w; x += MB_SIZE, z++) { mb = &s->mb[z]; if(mode == MODE_MC && mb->best_encoding == ENC_SKIP) continue; get_sub_picture(s, x, y, scratch_pict, &sub_scratch); if(mode == MODE_V1_ONLY || mb->best_encoding == ENC_V1) decode_v1_vector(s, &sub_scratch, mb, info); else if(mode != MODE_V1_ONLY && mb->best_encoding == ENC_V4) decode_v4_vector(s, &sub_scratch, mb->v4_vector[v4], info); } } switch(mode) { case MODE_V1_ONLY: //av_log(s->avctx, AV_LOG_INFO, "mb_count = %i\n", mb_count); ret += write_chunk_header(buf + ret, 0x32, mb_count); for(x = 0; x < mb_count; x++) buf[ret++] = s->mb[x].v1_vector; break; case MODE_V1_V4: //remember header position header_ofs = ret; ret += CHUNK_HEADER_SIZE; for(x = 0; x < mb_count; x += 32) { flags = 0; for(y = x; y < FFMIN(x+32, mb_count); y++) if(s->mb[y].best_encoding == ENC_V4) flags |= 1 << (31 - y + x); AV_WB32(&buf[ret], flags); ret += 4; for(y = x; y < FFMIN(x+32, mb_count); y++) { mb = &s->mb[y]; if(mb->best_encoding == ENC_V1) buf[ret++] = mb->v1_vector; else for(z = 0; z < 4; z++) buf[ret++] = mb->v4_vector[v4][z]; } } write_chunk_header(buf + header_ofs, 0x30, ret - header_ofs - CHUNK_HEADER_SIZE); break; case MODE_MC: //remember header position header_ofs = ret; ret += CHUNK_HEADER_SIZE; flags = bits = temp_size = 0; for(x = 0; x < mb_count; x++) { mb = &s->mb[x]; flags |= (mb->best_encoding != ENC_SKIP) << (31 - bits++); needs_extra_bit = 0; should_write_temp = 0; if(mb->best_encoding != ENC_SKIP) { if(bits < 32) flags |= (mb->best_encoding == ENC_V4) << (31 - bits++); else needs_extra_bit = 1; } if(bits == 32) { AV_WB32(&buf[ret], flags); ret += 4; flags = bits = 0; if(mb->best_encoding == ENC_SKIP || needs_extra_bit) { memcpy(&buf[ret], temp, temp_size); ret += temp_size; temp_size = 0; } else should_write_temp = 1; } if(needs_extra_bit) { flags = (mb->best_encoding == ENC_V4) << 31; bits = 1; } if(mb->best_encoding == ENC_V1) temp[temp_size++] = mb->v1_vector; else if(mb->best_encoding == ENC_V4) for(z = 0; z < 4; z++) temp[temp_size++] = mb->v4_vector[v4][z]; if(should_write_temp) { memcpy(&buf[ret], temp, temp_size); ret += temp_size; temp_size = 0; } } if(bits > 0) { AV_WB32(&buf[ret], flags); ret += 4; memcpy(&buf[ret], temp, temp_size); ret += temp_size; } write_chunk_header(buf + header_ofs, 0x31, ret - header_ofs - CHUNK_HEADER_SIZE); break; } return ret; } | static int encode_mode(CinepakEncContext *s, CinepakMode mode, int h, int v1_size, int v4_size, int v4, AVPicture *scratch_pict, strip_info *info, unsigned char *buf) { int x, y, z, flags, bits, temp_size, header_ofs, ret = 0, mb_count = s->w * h / MB_AREA; int needs_extra_bit, should_write_temp; unsigned char temp[64]; | 1,266 |
1 | inline void update_rq_clock(struct rq *rq)
{
if (!rq->skip_clock_update) {
int cpu = cpu_of(rq);
u64 irq_time;
rq->clock = sched_clock_cpu(cpu);
irq_time = irq_time_cpu(cpu);
if (rq->clock - irq_time > rq->clock_task)
rq->clock_task = rq->clock - irq_time;
sched_irq_time_avg_update(rq, irq_time);
}
} | inline void update_rq_clock(struct rq *rq)
{
if (!rq->skip_clock_update) {
int cpu = cpu_of(rq);
u64 irq_time;
rq->clock = sched_clock_cpu(cpu);
irq_time = irq_time_cpu(cpu);
if (rq->clock - irq_time > rq->clock_task)
rq->clock_task = rq->clock - irq_time;
sched_irq_time_avg_update(rq, irq_time);
}
} | 1,268 |
0 | static char * * create_argv_command ( struct rule * rule , struct process * process , struct iovec * * argv ) {
size_t count , i , j , stdin_arg ;
char * * req_argv = NULL ;
const char * program ;
for ( count = 0 ;
argv [ count ] != NULL ;
count ++ ) ;
if ( rule -> sudo_user == NULL ) req_argv = xcalloc ( count + 1 , sizeof ( char * ) ) ;
else req_argv = xcalloc ( count + 5 , sizeof ( char * ) ) ;
if ( rule -> sudo_user != NULL ) {
req_argv [ 0 ] = xstrdup ( PATH_SUDO ) ;
req_argv [ 1 ] = xstrdup ( "-u" ) ;
req_argv [ 2 ] = xstrdup ( rule -> sudo_user ) ;
req_argv [ 3 ] = xstrdup ( "--" ) ;
req_argv [ 4 ] = xstrdup ( rule -> program ) ;
j = 5 ;
}
else {
program = strrchr ( rule -> program , '/' ) ;
if ( program == NULL ) program = rule -> program ;
else program ++ ;
req_argv [ 0 ] = xstrdup ( program ) ;
j = 1 ;
}
if ( rule -> stdin_arg == - 1 ) stdin_arg = count - 1 ;
else stdin_arg = ( size_t ) rule -> stdin_arg ;
for ( i = 1 ;
i < count ;
i ++ ) {
const char * data = argv [ i ] -> iov_base ;
size_t length = argv [ i ] -> iov_len ;
if ( i == stdin_arg ) {
process -> input = evbuffer_new ( ) ;
if ( process -> input == NULL ) die ( "internal error: cannot create input buffer" ) ;
if ( evbuffer_add ( process -> input , data , length ) < 0 ) die ( "internal error: cannot add data to input buffer" ) ;
continue ;
}
if ( length == 0 ) req_argv [ j ] = xstrdup ( "" ) ;
else req_argv [ j ] = xstrndup ( data , length ) ;
j ++ ;
}
req_argv [ j ] = NULL ;
return req_argv ;
} | static char * * create_argv_command ( struct rule * rule , struct process * process , struct iovec * * argv ) {
size_t count , i , j , stdin_arg ;
char * * req_argv = NULL ;
const char * program ;
for ( count = 0 ;
argv [ count ] != NULL ;
count ++ ) ;
if ( rule -> sudo_user == NULL ) req_argv = xcalloc ( count + 1 , sizeof ( char * ) ) ;
else req_argv = xcalloc ( count + 5 , sizeof ( char * ) ) ;
if ( rule -> sudo_user != NULL ) {
req_argv [ 0 ] = xstrdup ( PATH_SUDO ) ;
req_argv [ 1 ] = xstrdup ( "-u" ) ;
req_argv [ 2 ] = xstrdup ( rule -> sudo_user ) ;
req_argv [ 3 ] = xstrdup ( "--" ) ;
req_argv [ 4 ] = xstrdup ( rule -> program ) ;
j = 5 ;
}
else {
program = strrchr ( rule -> program , '/' ) ;
if ( program == NULL ) program = rule -> program ;
else program ++ ;
req_argv [ 0 ] = xstrdup ( program ) ;
j = 1 ;
}
if ( rule -> stdin_arg == - 1 ) stdin_arg = count - 1 ;
else stdin_arg = ( size_t ) rule -> stdin_arg ;
for ( i = 1 ;
i < count ;
i ++ ) {
const char * data = argv [ i ] -> iov_base ;
size_t length = argv [ i ] -> iov_len ;
if ( i == stdin_arg ) {
process -> input = evbuffer_new ( ) ;
if ( process -> input == NULL ) die ( "internal error: cannot create input buffer" ) ;
if ( evbuffer_add ( process -> input , data , length ) < 0 ) die ( "internal error: cannot add data to input buffer" ) ;
continue ;
}
if ( length == 0 ) req_argv [ j ] = xstrdup ( "" ) ;
else req_argv [ j ] = xstrndup ( data , length ) ;
j ++ ;
}
req_argv [ j ] = NULL ;
return req_argv ;
} | 1,269 |
1 | static int smacker_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = &s->pb; SmackerContext *smk = (SmackerContext *)s->priv_data; AVStream *st, *ast[7]; int i, ret; int tbase; /* read and check header */ smk->magic = get_le32(pb); if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4')) smk->width = get_le32(pb); smk->height = get_le32(pb); smk->frames = get_le32(pb); smk->pts_inc = (int32_t)get_le32(pb); smk->flags = get_le32(pb); for(i = 0; i < 7; i++) smk->audio[i] = get_le32(pb); smk->treesize = get_le32(pb); smk->mmap_size = get_le32(pb); smk->mclr_size = get_le32(pb); smk->full_size = get_le32(pb); smk->type_size = get_le32(pb); for(i = 0; i < 7; i++) smk->rates[i] = get_le32(pb); smk->pad = get_le32(pb); /* setup data */ if(smk->frames > 0xFFFFFF) { av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames); smk->frm_size = av_malloc(smk->frames * 4); smk->frm_flags = av_malloc(smk->frames); smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2')); /* read frame info */ for(i = 0; i < smk->frames; i++) { smk->frm_size[i] = get_le32(pb); for(i = 0; i < smk->frames; i++) { smk->frm_flags[i] = get_byte(pb); /* init video codec */ st = av_new_stream(s, 0); if (!st) smk->videoindex = st->index; st->codec->width = smk->width; st->codec->height = smk->height; st->codec->pix_fmt = PIX_FMT_PAL8; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_SMACKVIDEO; st->codec->codec_tag = smk->is_ver4; /* Smacker uses 100000 as internal timebase */ if(smk->pts_inc < 0) smk->pts_inc = -smk->pts_inc; else smk->pts_inc *= 100; tbase = 100000; av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1); av_set_pts_info(st, 33, smk->pts_inc, tbase); /* handle possible audio streams */ for(i = 0; i < 7; i++) { smk->indexes[i] = -1; if((smk->rates[i] & 0xFFFFFF) && !(smk->rates[i] & SMK_AUD_BINKAUD)){ ast[i] = av_new_stream(s, 0); smk->indexes[i] = ast[i]->index; av_set_pts_info(ast[i], 33, smk->pts_inc, tbase); ast[i]->codec->codec_type = CODEC_TYPE_AUDIO; ast[i]->codec->codec_id = (smk->rates[i] & SMK_AUD_PACKED) ? CODEC_ID_SMACKAUDIO : CODEC_ID_PCM_U8; ast[i]->codec->codec_tag = 0; ast[i]->codec->channels = (smk->rates[i] & SMK_AUD_STEREO) ? 2 : 1; ast[i]->codec->sample_rate = smk->rates[i] & 0xFFFFFF; ast[i]->codec->bits_per_sample = (smk->rates[i] & SMK_AUD_16BITS) ? 16 : 8; if(ast[i]->codec->bits_per_sample == 16 && ast[i]->codec->codec_id == CODEC_ID_PCM_U8) ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE; /* load trees to extradata, they will be unpacked by decoder */ st->codec->extradata = av_malloc(smk->treesize + 16); st->codec->extradata_size = smk->treesize + 16; if(!st->codec->extradata){ av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16); av_free(smk->frm_size); av_free(smk->frm_flags); ret = get_buffer(pb, st->codec->extradata + 16, st->codec->extradata_size - 16); if(ret != st->codec->extradata_size - 16){ av_free(smk->frm_size); av_free(smk->frm_flags); return AVERROR_IO; ((int32_t*)st->codec->extradata)[0] = le2me_32(smk->mmap_size); ((int32_t*)st->codec->extradata)[1] = le2me_32(smk->mclr_size); ((int32_t*)st->codec->extradata)[2] = le2me_32(smk->full_size); ((int32_t*)st->codec->extradata)[3] = le2me_32(smk->type_size); smk->curstream = -1; smk->nextpos = url_ftell(pb); return 0; | static int smacker_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = &s->pb; SmackerContext *smk = (SmackerContext *)s->priv_data; AVStream *st, *ast[7]; int i, ret; int tbase; smk->magic = get_le32(pb); if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4')) smk->width = get_le32(pb); smk->height = get_le32(pb); smk->frames = get_le32(pb); smk->pts_inc = (int32_t)get_le32(pb); smk->flags = get_le32(pb); for(i = 0; i < 7; i++) smk->audio[i] = get_le32(pb); smk->treesize = get_le32(pb); smk->mmap_size = get_le32(pb); smk->mclr_size = get_le32(pb); smk->full_size = get_le32(pb); smk->type_size = get_le32(pb); for(i = 0; i < 7; i++) smk->rates[i] = get_le32(pb); smk->pad = get_le32(pb); if(smk->frames > 0xFFFFFF) { av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames); smk->frm_size = av_malloc(smk->frames * 4); smk->frm_flags = av_malloc(smk->frames); smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2')); for(i = 0; i < smk->frames; i++) { smk->frm_size[i] = get_le32(pb); for(i = 0; i < smk->frames; i++) { smk->frm_flags[i] = get_byte(pb); st = av_new_stream(s, 0); if (!st) smk->videoindex = st->index; st->codec->width = smk->width; st->codec->height = smk->height; st->codec->pix_fmt = PIX_FMT_PAL8; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_SMACKVIDEO; st->codec->codec_tag = smk->is_ver4; if(smk->pts_inc < 0) smk->pts_inc = -smk->pts_inc; else smk->pts_inc *= 100; tbase = 100000; av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1); av_set_pts_info(st, 33, smk->pts_inc, tbase); for(i = 0; i < 7; i++) { smk->indexes[i] = -1; if((smk->rates[i] & 0xFFFFFF) && !(smk->rates[i] & SMK_AUD_BINKAUD)){ ast[i] = av_new_stream(s, 0); smk->indexes[i] = ast[i]->index; av_set_pts_info(ast[i], 33, smk->pts_inc, tbase); ast[i]->codec->codec_type = CODEC_TYPE_AUDIO; ast[i]->codec->codec_id = (smk->rates[i] & SMK_AUD_PACKED) ? CODEC_ID_SMACKAUDIO : CODEC_ID_PCM_U8; ast[i]->codec->codec_tag = 0; ast[i]->codec->channels = (smk->rates[i] & SMK_AUD_STEREO) ? 2 : 1; ast[i]->codec->sample_rate = smk->rates[i] & 0xFFFFFF; ast[i]->codec->bits_per_sample = (smk->rates[i] & SMK_AUD_16BITS) ? 16 : 8; if(ast[i]->codec->bits_per_sample == 16 && ast[i]->codec->codec_id == CODEC_ID_PCM_U8) ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE; st->codec->extradata = av_malloc(smk->treesize + 16); st->codec->extradata_size = smk->treesize + 16; if(!st->codec->extradata){ av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16); av_free(smk->frm_size); av_free(smk->frm_flags); ret = get_buffer(pb, st->codec->extradata + 16, st->codec->extradata_size - 16); if(ret != st->codec->extradata_size - 16){ av_free(smk->frm_size); av_free(smk->frm_flags); return AVERROR_IO; ((int32_t*)st->codec->extradata)[0] = le2me_32(smk->mmap_size); ((int32_t*)st->codec->extradata)[1] = le2me_32(smk->mclr_size); ((int32_t*)st->codec->extradata)[2] = le2me_32(smk->full_size); ((int32_t*)st->codec->extradata)[3] = le2me_32(smk->type_size); smk->curstream = -1; smk->nextpos = url_ftell(pb); return 0; | 1,270 |
1 | static void pci_edu_realize(PCIDevice *pdev, Error **errp) { EduState *edu = DO_UPCAST(EduState, pdev, pdev); uint8_t *pci_conf = pdev->config; timer_init_ms(&edu->dma_timer, QEMU_CLOCK_VIRTUAL, edu_dma_timer, edu); qemu_mutex_init(&edu->thr_mutex); qemu_cond_init(&edu->thr_cond); qemu_thread_create(&edu->thread, "edu", edu_fact_thread, edu, QEMU_THREAD_JOINABLE); pci_config_set_interrupt_pin(pci_conf, 1); if (msi_init(pdev, 0, 1, true, false, errp)) { return; } memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu, "edu-mmio", 1 << 20); pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio); } | static void pci_edu_realize(PCIDevice *pdev, Error **errp) { EduState *edu = DO_UPCAST(EduState, pdev, pdev); uint8_t *pci_conf = pdev->config; timer_init_ms(&edu->dma_timer, QEMU_CLOCK_VIRTUAL, edu_dma_timer, edu); qemu_mutex_init(&edu->thr_mutex); qemu_cond_init(&edu->thr_cond); qemu_thread_create(&edu->thread, "edu", edu_fact_thread, edu, QEMU_THREAD_JOINABLE); pci_config_set_interrupt_pin(pci_conf, 1); if (msi_init(pdev, 0, 1, true, false, errp)) { return; } memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu, "edu-mmio", 1 << 20); pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio); } | 1,271 |
1 | static int originates_from_local_legacy_unicast_socket(AvahiServer *s, const AvahiAddress *address, uint16_t port) {
assert(s);
assert(address);
assert(port > 0);
if (!s->config.enable_reflector)
return 0;
if (!avahi_address_is_local(s->monitor, address))
return 0;
if (address->proto == AVAHI_PROTO_INET && s->fd_legacy_unicast_ipv4 >= 0) {
struct sockaddr_in lsa;
socklen_t l = sizeof(lsa);
if (getsockname(s->fd_legacy_unicast_ipv4, (struct sockaddr*) &lsa, &l) != 0)
avahi_log_warn("getsockname(): %s", strerror(errno));
else
return lsa.sin_port == port;
}
if (address->proto == AVAHI_PROTO_INET6 && s->fd_legacy_unicast_ipv6 >= 0) {
struct sockaddr_in6 lsa;
socklen_t l = sizeof(lsa);
if (getsockname(s->fd_legacy_unicast_ipv6, (struct sockaddr*) &lsa, &l) != 0)
avahi_log_warn("getsockname(): %s", strerror(errno));
else
return lsa.sin6_port == port;
}
return 0;
} | static int originates_from_local_legacy_unicast_socket(AvahiServer *s, const AvahiAddress *address, uint16_t port) {
assert(s);
assert(address);
assert(port > 0);
if (!s->config.enable_reflector)
return 0;
if (!avahi_address_is_local(s->monitor, address))
return 0;
if (address->proto == AVAHI_PROTO_INET && s->fd_legacy_unicast_ipv4 >= 0) {
struct sockaddr_in lsa;
socklen_t l = sizeof(lsa);
if (getsockname(s->fd_legacy_unicast_ipv4, (struct sockaddr*) &lsa, &l) != 0)
avahi_log_warn("getsockname(): %s", strerror(errno));
else
return lsa.sin_port == port;
}
if (address->proto == AVAHI_PROTO_INET6 && s->fd_legacy_unicast_ipv6 >= 0) {
struct sockaddr_in6 lsa;
socklen_t l = sizeof(lsa);
if (getsockname(s->fd_legacy_unicast_ipv6, (struct sockaddr*) &lsa, &l) != 0)
avahi_log_warn("getsockname(): %s", strerror(errno));
else
return lsa.sin6_port == port;
}
return 0;
} | 1,272 |
0 | int mem_mapped_map_color_rgb ( gx_device * dev , gx_color_index color , gx_color_value prgb [ 3 ] ) {
gx_device_memory * const mdev = ( gx_device_memory * ) dev ;
const byte * pptr = mdev -> palette . data + ( int ) color * 3 ;
prgb [ 0 ] = gx_color_value_from_byte ( pptr [ 0 ] ) ;
prgb [ 1 ] = gx_color_value_from_byte ( pptr [ 1 ] ) ;
prgb [ 2 ] = gx_color_value_from_byte ( pptr [ 2 ] ) ;
return 0 ;
} | int mem_mapped_map_color_rgb ( gx_device * dev , gx_color_index color , gx_color_value prgb [ 3 ] ) {
gx_device_memory * const mdev = ( gx_device_memory * ) dev ;
const byte * pptr = mdev -> palette . data + ( int ) color * 3 ;
prgb [ 0 ] = gx_color_value_from_byte ( pptr [ 0 ] ) ;
prgb [ 1 ] = gx_color_value_from_byte ( pptr [ 1 ] ) ;
prgb [ 2 ] = gx_color_value_from_byte ( pptr [ 2 ] ) ;
return 0 ;
} | 1,273 |
1 | static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
size_t event_size = sizeof (struct inotify_event);
struct inotify_device *dev;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
dev = file->private_data;
while (1) {
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&dev->ev_mutex);
if (!list_empty(&dev->events)) {
ret = 0;
break;
}
mutex_unlock(&dev->ev_mutex);
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
finish_wait(&dev->wq, &wait);
if (ret)
return ret;
while (1) {
struct inotify_kernel_event *kevent;
ret = buf - start;
if (list_empty(&dev->events))
break;
kevent = inotify_dev_get_event(dev);
if (event_size + kevent->event.len > count) {
if (ret == 0 && count > 0) {
/*
* could not get a single event because we
* didn't have enough buffer space.
*/
ret = -EINVAL;
}
break;
}
remove_kevent(dev, kevent);
/*
* Must perform the copy_to_user outside the mutex in order
* to avoid a lock order reversal with mmap_sem.
*/
mutex_unlock(&dev->ev_mutex);
if (copy_to_user(buf, &kevent->event, event_size)) {
ret = -EFAULT;
break;
}
buf += event_size;
count -= event_size;
if (kevent->name) {
if (copy_to_user(buf, kevent->name, kevent->event.len)){
ret = -EFAULT;
break;
}
buf += kevent->event.len;
count -= kevent->event.len;
}
free_kevent(kevent);
mutex_lock(&dev->ev_mutex);
}
mutex_unlock(&dev->ev_mutex);
return ret;
} | static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
size_t event_size = sizeof (struct inotify_event);
struct inotify_device *dev;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
dev = file->private_data;
while (1) {
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&dev->ev_mutex);
if (!list_empty(&dev->events)) {
ret = 0;
break;
}
mutex_unlock(&dev->ev_mutex);
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
finish_wait(&dev->wq, &wait);
if (ret)
return ret;
while (1) {
struct inotify_kernel_event *kevent;
ret = buf - start;
if (list_empty(&dev->events))
break;
kevent = inotify_dev_get_event(dev);
if (event_size + kevent->event.len > count) {
if (ret == 0 && count > 0) {
ret = -EINVAL;
}
break;
}
remove_kevent(dev, kevent);
mutex_unlock(&dev->ev_mutex);
if (copy_to_user(buf, &kevent->event, event_size)) {
ret = -EFAULT;
break;
}
buf += event_size;
count -= event_size;
if (kevent->name) {
if (copy_to_user(buf, kevent->name, kevent->event.len)){
ret = -EFAULT;
break;
}
buf += kevent->event.len;
count -= kevent->event.len;
}
free_kevent(kevent);
mutex_lock(&dev->ev_mutex);
}
mutex_unlock(&dev->ev_mutex);
return ret;
} | 1,275 |
1 | static void _UTF16LEToUnicodeWithOffsets ( UConverterToUnicodeArgs * pArgs , UErrorCode * pErrorCode ) {
UConverter * cnv ;
const uint8_t * source ;
UChar * target ;
int32_t * offsets ;
uint32_t targetCapacity , length , count , sourceIndex ;
UChar c , trail ;
if ( pArgs -> converter -> mode < 8 ) {
_UTF16ToUnicodeWithOffsets ( pArgs , pErrorCode ) ;
return ;
}
cnv = pArgs -> converter ;
source = ( const uint8_t * ) pArgs -> source ;
length = ( int32_t ) ( ( const uint8_t * ) pArgs -> sourceLimit - source ) ;
if ( length <= 0 && cnv -> toUnicodeStatus == 0 ) {
return ;
}
target = pArgs -> target ;
if ( target >= pArgs -> targetLimit ) {
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
return ;
}
targetCapacity = ( uint32_t ) ( pArgs -> targetLimit - pArgs -> target ) ;
offsets = pArgs -> offsets ;
sourceIndex = 0 ;
c = 0 ;
if ( cnv -> toUnicodeStatus != 0 ) {
cnv -> toUBytes [ 0 ] = ( uint8_t ) cnv -> toUnicodeStatus ;
cnv -> toULength = 1 ;
cnv -> toUnicodeStatus = 0 ;
}
if ( ( count = cnv -> toULength ) != 0 ) {
uint8_t * p = cnv -> toUBytes ;
do {
p [ count ++ ] = * source ++ ;
++ sourceIndex ;
-- length ;
if ( count == 2 ) {
c = ( ( UChar ) p [ 1 ] << 8 ) | p [ 0 ] ;
if ( U16_IS_SINGLE ( c ) ) {
* target ++ = c ;
if ( offsets != NULL ) {
* offsets ++ = - 1 ;
}
-- targetCapacity ;
count = 0 ;
c = 0 ;
break ;
}
else if ( U16_IS_SURROGATE_LEAD ( c ) ) {
c = 0 ;
}
else {
break ;
}
}
else if ( count == 4 ) {
c = ( ( UChar ) p [ 1 ] << 8 ) | p [ 0 ] ;
trail = ( ( UChar ) p [ 3 ] << 8 ) | p [ 2 ] ;
if ( U16_IS_TRAIL ( trail ) ) {
* target ++ = c ;
if ( targetCapacity >= 2 ) {
* target ++ = trail ;
if ( offsets != NULL ) {
* offsets ++ = - 1 ;
* offsets ++ = - 1 ;
}
targetCapacity -= 2 ;
}
else {
targetCapacity = 0 ;
cnv -> UCharErrorBuffer [ 0 ] = trail ;
cnv -> UCharErrorBufferLength = 1 ;
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
}
count = 0 ;
c = 0 ;
break ;
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
if ( ( ( const uint8_t * ) pArgs -> source - source ) >= 2 ) {
source -= 2 ;
}
else {
cnv -> toUnicodeStatus = 0x100 | p [ 2 ] ;
-- source ;
}
cnv -> toULength = 2 ;
pArgs -> source = ( const char * ) source ;
pArgs -> target = target ;
pArgs -> offsets = offsets ;
return ;
}
}
}
while ( length > 0 ) ;
cnv -> toULength = ( int8_t ) count ;
}
count = 2 * targetCapacity ;
if ( count > length ) {
count = length & ~ 1 ;
}
if ( c == 0 && count > 0 ) {
length -= count ;
count >>= 1 ;
targetCapacity -= count ;
if ( offsets == NULL ) {
do {
c = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ;
source += 2 ;
if ( U16_IS_SINGLE ( c ) ) {
* target ++ = c ;
}
else if ( U16_IS_SURROGATE_LEAD ( c ) && count >= 2 && U16_IS_TRAIL ( trail = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ) ) {
source += 2 ;
-- count ;
* target ++ = c ;
* target ++ = trail ;
}
else {
break ;
}
}
while ( -- count > 0 ) ;
}
else {
do {
c = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ;
source += 2 ;
if ( U16_IS_SINGLE ( c ) ) {
* target ++ = c ;
* offsets ++ = sourceIndex ;
sourceIndex += 2 ;
}
else if ( U16_IS_SURROGATE_LEAD ( c ) && count >= 2 && U16_IS_TRAIL ( trail = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ) ) {
source += 2 ;
-- count ;
* target ++ = c ;
* target ++ = trail ;
* offsets ++ = sourceIndex ;
* offsets ++ = sourceIndex ;
sourceIndex += 4 ;
}
else {
break ;
}
}
while ( -- count > 0 ) ;
}
if ( count == 0 ) {
c = 0 ;
}
else {
length += 2 * ( count - 1 ) ;
targetCapacity += count ;
}
}
if ( c != 0 ) {
cnv -> toUBytes [ 0 ] = ( uint8_t ) c ;
cnv -> toUBytes [ 1 ] = ( uint8_t ) ( c >> 8 ) ;
cnv -> toULength = 2 ;
if ( U16_IS_SURROGATE_LEAD ( c ) ) {
if ( length >= 2 ) {
if ( U16_IS_TRAIL ( trail = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ) ) {
source += 2 ;
length -= 2 ;
* target ++ = c ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
cnv -> UCharErrorBuffer [ 0 ] = trail ;
cnv -> UCharErrorBufferLength = 1 ;
cnv -> toULength = 0 ;
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
}
}
else {
}
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
}
}
if ( U_SUCCESS ( * pErrorCode ) ) {
if ( length > 0 ) {
if ( targetCapacity == 0 ) {
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
}
else {
cnv -> toUBytes [ cnv -> toULength ++ ] = * source ++ ;
}
}
}
pArgs -> source = ( const char * ) source ;
pArgs -> target = target ;
pArgs -> offsets = offsets ;
} | static void _UTF16LEToUnicodeWithOffsets ( UConverterToUnicodeArgs * pArgs , UErrorCode * pErrorCode ) {
UConverter * cnv ;
const uint8_t * source ;
UChar * target ;
int32_t * offsets ;
uint32_t targetCapacity , length , count , sourceIndex ;
UChar c , trail ;
if ( pArgs -> converter -> mode < 8 ) {
_UTF16ToUnicodeWithOffsets ( pArgs , pErrorCode ) ;
return ;
}
cnv = pArgs -> converter ;
source = ( const uint8_t * ) pArgs -> source ;
length = ( int32_t ) ( ( const uint8_t * ) pArgs -> sourceLimit - source ) ;
if ( length <= 0 && cnv -> toUnicodeStatus == 0 ) {
return ;
}
target = pArgs -> target ;
if ( target >= pArgs -> targetLimit ) {
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
return ;
}
targetCapacity = ( uint32_t ) ( pArgs -> targetLimit - pArgs -> target ) ;
offsets = pArgs -> offsets ;
sourceIndex = 0 ;
c = 0 ;
if ( cnv -> toUnicodeStatus != 0 ) {
cnv -> toUBytes [ 0 ] = ( uint8_t ) cnv -> toUnicodeStatus ;
cnv -> toULength = 1 ;
cnv -> toUnicodeStatus = 0 ;
}
if ( ( count = cnv -> toULength ) != 0 ) {
uint8_t * p = cnv -> toUBytes ;
do {
p [ count ++ ] = * source ++ ;
++ sourceIndex ;
-- length ;
if ( count == 2 ) {
c = ( ( UChar ) p [ 1 ] << 8 ) | p [ 0 ] ;
if ( U16_IS_SINGLE ( c ) ) {
* target ++ = c ;
if ( offsets != NULL ) {
* offsets ++ = - 1 ;
}
-- targetCapacity ;
count = 0 ;
c = 0 ;
break ;
}
else if ( U16_IS_SURROGATE_LEAD ( c ) ) {
c = 0 ;
}
else {
break ;
}
}
else if ( count == 4 ) {
c = ( ( UChar ) p [ 1 ] << 8 ) | p [ 0 ] ;
trail = ( ( UChar ) p [ 3 ] << 8 ) | p [ 2 ] ;
if ( U16_IS_TRAIL ( trail ) ) {
* target ++ = c ;
if ( targetCapacity >= 2 ) {
* target ++ = trail ;
if ( offsets != NULL ) {
* offsets ++ = - 1 ;
* offsets ++ = - 1 ;
}
targetCapacity -= 2 ;
}
else {
targetCapacity = 0 ;
cnv -> UCharErrorBuffer [ 0 ] = trail ;
cnv -> UCharErrorBufferLength = 1 ;
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
}
count = 0 ;
c = 0 ;
break ;
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
if ( ( ( const uint8_t * ) pArgs -> source - source ) >= 2 ) {
source -= 2 ;
}
else {
cnv -> toUnicodeStatus = 0x100 | p [ 2 ] ;
-- source ;
}
cnv -> toULength = 2 ;
pArgs -> source = ( const char * ) source ;
pArgs -> target = target ;
pArgs -> offsets = offsets ;
return ;
}
}
}
while ( length > 0 ) ;
cnv -> toULength = ( int8_t ) count ;
}
count = 2 * targetCapacity ;
if ( count > length ) {
count = length & ~ 1 ;
}
if ( c == 0 && count > 0 ) {
length -= count ;
count >>= 1 ;
targetCapacity -= count ;
if ( offsets == NULL ) {
do {
c = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ;
source += 2 ;
if ( U16_IS_SINGLE ( c ) ) {
* target ++ = c ;
}
else if ( U16_IS_SURROGATE_LEAD ( c ) && count >= 2 && U16_IS_TRAIL ( trail = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ) ) {
source += 2 ;
-- count ;
* target ++ = c ;
* target ++ = trail ;
}
else {
break ;
}
}
while ( -- count > 0 ) ;
}
else {
do {
c = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ;
source += 2 ;
if ( U16_IS_SINGLE ( c ) ) {
* target ++ = c ;
* offsets ++ = sourceIndex ;
sourceIndex += 2 ;
}
else if ( U16_IS_SURROGATE_LEAD ( c ) && count >= 2 && U16_IS_TRAIL ( trail = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ) ) {
source += 2 ;
-- count ;
* target ++ = c ;
* target ++ = trail ;
* offsets ++ = sourceIndex ;
* offsets ++ = sourceIndex ;
sourceIndex += 4 ;
}
else {
break ;
}
}
while ( -- count > 0 ) ;
}
if ( count == 0 ) {
c = 0 ;
}
else {
length += 2 * ( count - 1 ) ;
targetCapacity += count ;
}
}
if ( c != 0 ) {
cnv -> toUBytes [ 0 ] = ( uint8_t ) c ;
cnv -> toUBytes [ 1 ] = ( uint8_t ) ( c >> 8 ) ;
cnv -> toULength = 2 ;
if ( U16_IS_SURROGATE_LEAD ( c ) ) {
if ( length >= 2 ) {
if ( U16_IS_TRAIL ( trail = ( ( UChar ) source [ 1 ] << 8 ) | source [ 0 ] ) ) {
source += 2 ;
length -= 2 ;
* target ++ = c ;
if ( offsets != NULL ) {
* offsets ++ = sourceIndex ;
}
cnv -> UCharErrorBuffer [ 0 ] = trail ;
cnv -> UCharErrorBufferLength = 1 ;
cnv -> toULength = 0 ;
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
}
}
else {
}
}
else {
* pErrorCode = U_ILLEGAL_CHAR_FOUND ;
}
}
if ( U_SUCCESS ( * pErrorCode ) ) {
if ( length > 0 ) {
if ( targetCapacity == 0 ) {
* pErrorCode = U_BUFFER_OVERFLOW_ERROR ;
}
else {
cnv -> toUBytes [ cnv -> toULength ++ ] = * source ++ ;
}
}
}
pArgs -> source = ( const char * ) source ;
pArgs -> target = target ;
pArgs -> offsets = offsets ;
} | 1,277 |
1 | static int wma_decode_block(WMACodecContext *s) { int n, v, a, ch, bsize; int coef_nb_bits, total_gain; int nb_coefs[MAX_CHANNELS]; float mdct_norm; FFTContext *mdct; #ifdef TRACE tprintf(s->avctx, "***decode_block: %d:%d\n", s->frame_count - 1, s->block_num); #endif /* compute current block length */ if (s->use_variable_block_len) { n = av_log2(s->nb_block_sizes - 1) + 1; if (s->reset_block_lengths) { s->reset_block_lengths = 0; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "prev_block_len_bits %d out of range\n", s->frame_len_bits - v); return -1; s->prev_block_len_bits = s->frame_len_bits - v; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "block_len_bits %d out of range\n", s->frame_len_bits - v); return -1; s->block_len_bits = s->frame_len_bits - v; } else { /* update block lengths */ s->prev_block_len_bits = s->block_len_bits; s->block_len_bits = s->next_block_len_bits; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "next_block_len_bits %d out of range\n", s->frame_len_bits - v); return -1; s->next_block_len_bits = s->frame_len_bits - v; } else { /* fixed block len */ s->next_block_len_bits = s->frame_len_bits; s->prev_block_len_bits = s->frame_len_bits; s->block_len_bits = s->frame_len_bits; if (s->frame_len_bits - s->block_len_bits >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "block_len_bits not initialized to a valid value\n"); return -1; /* now check if the block length is coherent with the frame length */ s->block_len = 1 << s->block_len_bits; if ((s->block_pos + s->block_len) > s->frame_len){ av_log(s->avctx, AV_LOG_ERROR, "frame_len overflow\n"); return -1; if (s->avctx->channels == 2) { s->ms_stereo = get_bits1(&s->gb); v = 0; for(ch = 0; ch < s->avctx->channels; ch++) { a = get_bits1(&s->gb); s->channel_coded[ch] = a; v |= a; bsize = s->frame_len_bits - s->block_len_bits; /* if no channel coded, no need to go further */ /* XXX: fix potential framing problems */ if (!v) goto next; /* read total gain and extract corresponding number of bits for coef escape coding */ total_gain = 1; for(;;) { a = get_bits(&s->gb, 7); total_gain += a; if (a != 127) break; coef_nb_bits= ff_wma_total_gain_to_bits(total_gain); /* compute number of coefficients */ n = s->coefs_end[bsize] - s->coefs_start; for(ch = 0; ch < s->avctx->channels; ch++) nb_coefs[ch] = n; /* complex coding */ if (s->use_noise_coding) { for(ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int i, n, a; n = s->exponent_high_sizes[bsize]; for(i=0;i<n;i++) { a = get_bits1(&s->gb); s->high_band_coded[ch][i] = a; /* if noise coding, the coefficients are not transmitted */ if (a) nb_coefs[ch] -= s->exponent_high_bands[bsize][i]; for(ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int i, n, val, code; n = s->exponent_high_sizes[bsize]; val = (int)0x80000000; for(i=0;i<n;i++) { if (s->high_band_coded[ch][i]) { if (val == (int)0x80000000) { val = get_bits(&s->gb, 7) - 19; } else { code = get_vlc2(&s->gb, s->hgain_vlc.table, HGAINVLCBITS, HGAINMAX); if (code < 0){ av_log(s->avctx, AV_LOG_ERROR, "hgain vlc invalid\n"); return -1; val += code - 18; s->high_band_values[ch][i] = val; /* exponents can be reused in short blocks. */ if ((s->block_len_bits == s->frame_len_bits) || get_bits1(&s->gb)) { for(ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { if (s->use_exp_vlc) { if (decode_exp_vlc(s, ch) < 0) return -1; } else { decode_exp_lsp(s, ch); s->exponents_bsize[ch] = bsize; /* parse spectral coefficients : just RLE encoding */ for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int tindex; WMACoef* ptr = &s->coefs1[ch][0]; /* special VLC tables are used for ms stereo because there is potentially less energy there */ tindex = (ch == 1 && s->ms_stereo); memset(ptr, 0, s->block_len * sizeof(WMACoef)); ff_wma_run_level_decode(s->avctx, &s->gb, &s->coef_vlc[tindex], s->level_table[tindex], s->run_table[tindex], 0, ptr, 0, nb_coefs[ch], s->block_len, s->frame_len_bits, coef_nb_bits); if (s->version == 1 && s->avctx->channels >= 2) { align_get_bits(&s->gb); /* normalize */ { int n4 = s->block_len / 2; mdct_norm = 1.0 / (float)n4; if (s->version == 1) { mdct_norm *= sqrt(n4); /* finally compute the MDCT coefficients */ for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { WMACoef *coefs1; float *coefs, *exponents, mult, mult1, noise; int i, j, n, n1, last_high_band, esize; float exp_power[HIGH_BAND_MAX_SIZE]; coefs1 = s->coefs1[ch]; exponents = s->exponents[ch]; esize = s->exponents_bsize[ch]; mult = pow(10, total_gain * 0.05) / s->max_exponent[ch]; mult *= mdct_norm; coefs = s->coefs[ch]; if (s->use_noise_coding) { mult1 = mult; /* very low freqs : noise */ for(i = 0;i < s->coefs_start; i++) { *coefs++ = s->noise_table[s->noise_index] * exponents[i<<bsize>>esize] * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); n1 = s->exponent_high_sizes[bsize]; /* compute power of high bands */ exponents = s->exponents[ch] + (s->high_band_start[bsize]<<bsize>>esize); last_high_band = 0; /* avoid warning */ for(j=0;j<n1;j++) { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; if (s->high_band_coded[ch][j]) { float e2, v; e2 = 0; for(i = 0;i < n; i++) { v = exponents[i<<bsize>>esize]; e2 += v * v; exp_power[j] = e2 / n; last_high_band = j; tprintf(s->avctx, "%d: power=%f (%d)\n", j, exp_power[j], n); exponents += n<<bsize>>esize; /* main freqs and high freqs */ exponents = s->exponents[ch] + (s->coefs_start<<bsize>>esize); for(j=-1;j<n1;j++) { if (j < 0) { n = s->high_band_start[bsize] - s->coefs_start; } else { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; if (j >= 0 && s->high_band_coded[ch][j]) { /* use noise with specified power */ mult1 = sqrt(exp_power[j] / exp_power[last_high_band]); /* XXX: use a table */ mult1 = mult1 * pow(10, s->high_band_values[ch][j] * 0.05); mult1 = mult1 / (s->max_exponent[ch] * s->noise_mult); mult1 *= mdct_norm; for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = noise * exponents[i<<bsize>>esize] * mult1; exponents += n<<bsize>>esize; } else { /* coded values + small noise */ for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = ((*coefs1++) + noise) * exponents[i<<bsize>>esize] * mult; exponents += n<<bsize>>esize; /* very high freqs : noise */ n = s->block_len - s->coefs_end[bsize]; mult1 = mult * exponents[((-1<<bsize))>>esize]; for(i = 0; i < n; i++) { *coefs++ = s->noise_table[s->noise_index] * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); } else { /* XXX: optimize more */ for(i = 0;i < s->coefs_start; i++) *coefs++ = 0.0; n = nb_coefs[ch]; for(i = 0;i < n; i++) { *coefs++ = coefs1[i] * exponents[i<<bsize>>esize] * mult; n = s->block_len - s->coefs_end[bsize]; for(i = 0;i < n; i++) *coefs++ = 0.0; #ifdef TRACE for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { dump_floats(s, "exponents", 3, s->exponents[ch], s->block_len); dump_floats(s, "coefs", 1, s->coefs[ch], s->block_len); #endif if (s->ms_stereo && s->channel_coded[1]) { /* nominal case for ms stereo: we do it before mdct */ /* no need to optimize this case because it should almost never happen */ if (!s->channel_coded[0]) { tprintf(s->avctx, "rare ms-stereo case happened\n"); memset(s->coefs[0], 0, sizeof(float) * s->block_len); s->channel_coded[0] = 1; s->fdsp.butterflies_float(s->coefs[0], s->coefs[1], s->block_len); next: mdct = &s->mdct_ctx[bsize]; for (ch = 0; ch < s->avctx->channels; ch++) { int n4, index; n4 = s->block_len / 2; if(s->channel_coded[ch]){ mdct->imdct_calc(mdct, s->output, s->coefs[ch]); }else if(!(s->ms_stereo && ch==1)) memset(s->output, 0, sizeof(s->output)); /* multiply by the window and add in the frame */ index = (s->frame_len / 2) + s->block_pos - n4; wma_window(s, &s->frame_out[ch][index]); /* update block number */ s->block_num++; s->block_pos += s->block_len; if (s->block_pos >= s->frame_len) return 1; else return 0; | static int wma_decode_block(WMACodecContext *s) { int n, v, a, ch, bsize; int coef_nb_bits, total_gain; int nb_coefs[MAX_CHANNELS]; float mdct_norm; FFTContext *mdct; #ifdef TRACE tprintf(s->avctx, "***decode_block: %d:%d\n", s->frame_count - 1, s->block_num); #endif if (s->use_variable_block_len) { n = av_log2(s->nb_block_sizes - 1) + 1; if (s->reset_block_lengths) { s->reset_block_lengths = 0; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "prev_block_len_bits %d out of range\n", s->frame_len_bits - v); return -1; s->prev_block_len_bits = s->frame_len_bits - v; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "block_len_bits %d out of range\n", s->frame_len_bits - v); return -1; s->block_len_bits = s->frame_len_bits - v; } else { s->prev_block_len_bits = s->block_len_bits; s->block_len_bits = s->next_block_len_bits; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "next_block_len_bits %d out of range\n", s->frame_len_bits - v); return -1; s->next_block_len_bits = s->frame_len_bits - v; } else { s->next_block_len_bits = s->frame_len_bits; s->prev_block_len_bits = s->frame_len_bits; s->block_len_bits = s->frame_len_bits; if (s->frame_len_bits - s->block_len_bits >= s->nb_block_sizes){ av_log(s->avctx, AV_LOG_ERROR, "block_len_bits not initialized to a valid value\n"); return -1; s->block_len = 1 << s->block_len_bits; if ((s->block_pos + s->block_len) > s->frame_len){ av_log(s->avctx, AV_LOG_ERROR, "frame_len overflow\n"); return -1; if (s->avctx->channels == 2) { s->ms_stereo = get_bits1(&s->gb); v = 0; for(ch = 0; ch < s->avctx->channels; ch++) { a = get_bits1(&s->gb); s->channel_coded[ch] = a; v |= a; bsize = s->frame_len_bits - s->block_len_bits; if (!v) goto next; total_gain = 1; for(;;) { a = get_bits(&s->gb, 7); total_gain += a; if (a != 127) break; coef_nb_bits= ff_wma_total_gain_to_bits(total_gain); n = s->coefs_end[bsize] - s->coefs_start; for(ch = 0; ch < s->avctx->channels; ch++) nb_coefs[ch] = n; if (s->use_noise_coding) { for(ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int i, n, a; n = s->exponent_high_sizes[bsize]; for(i=0;i<n;i++) { a = get_bits1(&s->gb); s->high_band_coded[ch][i] = a; if (a) nb_coefs[ch] -= s->exponent_high_bands[bsize][i]; for(ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int i, n, val, code; n = s->exponent_high_sizes[bsize]; val = (int)0x80000000; for(i=0;i<n;i++) { if (s->high_band_coded[ch][i]) { if (val == (int)0x80000000) { val = get_bits(&s->gb, 7) - 19; } else { code = get_vlc2(&s->gb, s->hgain_vlc.table, HGAINVLCBITS, HGAINMAX); if (code < 0){ av_log(s->avctx, AV_LOG_ERROR, "hgain vlc invalid\n"); return -1; val += code - 18; s->high_band_values[ch][i] = val; if ((s->block_len_bits == s->frame_len_bits) || get_bits1(&s->gb)) { for(ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { if (s->use_exp_vlc) { if (decode_exp_vlc(s, ch) < 0) return -1; } else { decode_exp_lsp(s, ch); s->exponents_bsize[ch] = bsize; for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { int tindex; WMACoef* ptr = &s->coefs1[ch][0]; tindex = (ch == 1 && s->ms_stereo); memset(ptr, 0, s->block_len * sizeof(WMACoef)); ff_wma_run_level_decode(s->avctx, &s->gb, &s->coef_vlc[tindex], s->level_table[tindex], s->run_table[tindex], 0, ptr, 0, nb_coefs[ch], s->block_len, s->frame_len_bits, coef_nb_bits); if (s->version == 1 && s->avctx->channels >= 2) { align_get_bits(&s->gb); { int n4 = s->block_len / 2; mdct_norm = 1.0 / (float)n4; if (s->version == 1) { mdct_norm *= sqrt(n4); for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { WMACoef *coefs1; float *coefs, *exponents, mult, mult1, noise; int i, j, n, n1, last_high_band, esize; float exp_power[HIGH_BAND_MAX_SIZE]; coefs1 = s->coefs1[ch]; exponents = s->exponents[ch]; esize = s->exponents_bsize[ch]; mult = pow(10, total_gain * 0.05) / s->max_exponent[ch]; mult *= mdct_norm; coefs = s->coefs[ch]; if (s->use_noise_coding) { mult1 = mult; for(i = 0;i < s->coefs_start; i++) { *coefs++ = s->noise_table[s->noise_index] * exponents[i<<bsize>>esize] * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); n1 = s->exponent_high_sizes[bsize]; exponents = s->exponents[ch] + (s->high_band_start[bsize]<<bsize>>esize); last_high_band = 0; for(j=0;j<n1;j++) { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; if (s->high_band_coded[ch][j]) { float e2, v; e2 = 0; for(i = 0;i < n; i++) { v = exponents[i<<bsize>>esize]; e2 += v * v; exp_power[j] = e2 / n; last_high_band = j; tprintf(s->avctx, "%d: power=%f (%d)\n", j, exp_power[j], n); exponents += n<<bsize>>esize; exponents = s->exponents[ch] + (s->coefs_start<<bsize>>esize); for(j=-1;j<n1;j++) { if (j < 0) { n = s->high_band_start[bsize] - s->coefs_start; } else { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; if (j >= 0 && s->high_band_coded[ch][j]) { mult1 = sqrt(exp_power[j] / exp_power[last_high_band]); mult1 = mult1 * pow(10, s->high_band_values[ch][j] * 0.05); mult1 = mult1 / (s->max_exponent[ch] * s->noise_mult); mult1 *= mdct_norm; for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = noise * exponents[i<<bsize>>esize] * mult1; exponents += n<<bsize>>esize; } else { for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = ((*coefs1++) + noise) * exponents[i<<bsize>>esize] * mult; exponents += n<<bsize>>esize; n = s->block_len - s->coefs_end[bsize]; mult1 = mult * exponents[((-1<<bsize))>>esize]; for(i = 0; i < n; i++) { *coefs++ = s->noise_table[s->noise_index] * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); } else { for(i = 0;i < s->coefs_start; i++) *coefs++ = 0.0; n = nb_coefs[ch]; for(i = 0;i < n; i++) { *coefs++ = coefs1[i] * exponents[i<<bsize>>esize] * mult; n = s->block_len - s->coefs_end[bsize]; for(i = 0;i < n; i++) *coefs++ = 0.0; #ifdef TRACE for (ch = 0; ch < s->avctx->channels; ch++) { if (s->channel_coded[ch]) { dump_floats(s, "exponents", 3, s->exponents[ch], s->block_len); dump_floats(s, "coefs", 1, s->coefs[ch], s->block_len); #endif if (s->ms_stereo && s->channel_coded[1]) { if (!s->channel_coded[0]) { tprintf(s->avctx, "rare ms-stereo case happened\n"); memset(s->coefs[0], 0, sizeof(float) * s->block_len); s->channel_coded[0] = 1; s->fdsp.butterflies_float(s->coefs[0], s->coefs[1], s->block_len); next: mdct = &s->mdct_ctx[bsize]; for (ch = 0; ch < s->avctx->channels; ch++) { int n4, index; n4 = s->block_len / 2; if(s->channel_coded[ch]){ mdct->imdct_calc(mdct, s->output, s->coefs[ch]); }else if(!(s->ms_stereo && ch==1)) memset(s->output, 0, sizeof(s->output)); index = (s->frame_len / 2) + s->block_pos - n4; wma_window(s, &s->frame_out[ch][index]); s->block_num++; s->block_pos += s->block_len; if (s->block_pos >= s->frame_len) return 1; else return 0; | 1,278 |
1 | static int orinoco_ioctl_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
hermes_t *hw = &priv->hw;
struct iw_param *param = &wrqu->param;
unsigned long flags;
int ret = -EINPROGRESS;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
case IW_AUTH_PRIVACY_INVOKED:
case IW_AUTH_DROP_UNENCRYPTED:
/*
* orinoco does not use these parameters
*/
break;
case IW_AUTH_KEY_MGMT:
/* wl_lkm implies value 2 == PSK for Hermes I
* which ties in with WEXT
* no other hints tho :(
*/
priv->key_mgmt = param->value;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
/* When countermeasures are enabled, shut down the
* card; when disabled, re-enable the card. This must
* take effect immediately.
*
* TODO: Make sure that the EAPOL message is getting
* out before card disabled
*/
if (param->value) {
priv->tkip_cm_active = 1;
ret = hermes_enable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
ret = hermes_disable_port(hw, 0);
}
break;
case IW_AUTH_80211_AUTH_ALG:
if (param->value & IW_AUTH_ALG_SHARED_KEY)
priv->wep_restrict = 1;
else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
priv->wep_restrict = 0;
else
ret = -EINVAL;
break;
case IW_AUTH_WPA_ENABLED:
if (priv->has_wpa) {
priv->wpa_enabled = param->value ? 1 : 0;
} else {
if (param->value)
ret = -EOPNOTSUPP;
/* else silently accept disable of WPA */
priv->wpa_enabled = 0;
}
break;
default:
ret = -EOPNOTSUPP;
}
orinoco_unlock(priv, &flags);
return ret;
} | static int orinoco_ioctl_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
hermes_t *hw = &priv->hw;
struct iw_param *param = &wrqu->param;
unsigned long flags;
int ret = -EINPROGRESS;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
case IW_AUTH_PRIVACY_INVOKED:
case IW_AUTH_DROP_UNENCRYPTED:
break;
case IW_AUTH_KEY_MGMT:
priv->key_mgmt = param->value;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
if (param->value) {
priv->tkip_cm_active = 1;
ret = hermes_enable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
ret = hermes_disable_port(hw, 0);
}
break;
case IW_AUTH_80211_AUTH_ALG:
if (param->value & IW_AUTH_ALG_SHARED_KEY)
priv->wep_restrict = 1;
else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
priv->wep_restrict = 0;
else
ret = -EINVAL;
break;
case IW_AUTH_WPA_ENABLED:
if (priv->has_wpa) {
priv->wpa_enabled = param->value ? 1 : 0;
} else {
if (param->value)
ret = -EOPNOTSUPP;
priv->wpa_enabled = 0;
}
break;
default:
ret = -EOPNOTSUPP;
}
orinoco_unlock(priv, &flags);
return ret;
} | 1,280 |
1 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
char *virt;
size_t size = 0;
int rc = 0;
if (likely(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
printk(KERN_ERR "Key is invalid; bailing out\n");
rc = -EINVAL;
goto out;
}
} else {
printk(KERN_WARNING "%s: Encrypted flag not set\n",
__func__);
rc = -EINVAL;
goto out;
}
/* Released in this function */
virt = (char *)get_zeroed_page(GFP_KERNEL);
if (!virt) {
printk(KERN_ERR "%s: Out of memory\n", __func__);
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_write_headers_virt(virt, PAGE_CACHE_SIZE, &size,
crypt_stat, ecryptfs_dentry);
if (unlikely(rc)) {
printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
__func__, rc);
goto out_free;
}
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry,
crypt_stat, virt, size);
else
rc = ecryptfs_write_metadata_to_contents(crypt_stat,
ecryptfs_dentry, virt);
if (rc) {
printk(KERN_ERR "%s: Error writing metadata out to lower file; "
"rc = [%d]\n", __func__, rc);
goto out_free;
}
out_free:
free_page((unsigned long)virt);
out:
return rc;
} | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
char *virt;
size_t size = 0;
int rc = 0;
if (likely(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
printk(KERN_ERR "Key is invalid; bailing out\n");
rc = -EINVAL;
goto out;
}
} else {
printk(KERN_WARNING "%s: Encrypted flag not set\n",
__func__);
rc = -EINVAL;
goto out;
}
virt = (char *)get_zeroed_page(GFP_KERNEL);
if (!virt) {
printk(KERN_ERR "%s: Out of memory\n", __func__);
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_write_headers_virt(virt, PAGE_CACHE_SIZE, &size,
crypt_stat, ecryptfs_dentry);
if (unlikely(rc)) {
printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
__func__, rc);
goto out_free;
}
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry,
crypt_stat, virt, size);
else
rc = ecryptfs_write_metadata_to_contents(crypt_stat,
ecryptfs_dentry, virt);
if (rc) {
printk(KERN_ERR "%s: Error writing metadata out to lower file; "
"rc = [%d]\n", __func__, rc);
goto out_free;
}
out_free:
free_page((unsigned long)virt);
out:
return rc;
} | 1,281 |