func
stringlengths 12
2.67k
| cwe
stringclasses 7
values | __index_level_0__
int64 0
20k
|
|---|---|---|
int pop_reconnect(struct Mailbox *m)
{
struct PopAccountData *adata = pop_adata_get(m);
if (adata->status == POP_CONNECTED)
return 0;
while (true)
{
mutt_socket_close(adata->conn);
int ret = pop_open_connection(adata);
if (ret == 0)
{
struct Progress progress;
mutt_progress_init(&progress, _("Verifying message indexes..."), MUTT_PROGRESS_NET, 0);
for (int i = 0; i < m->msg_count; i++)
{
struct PopEmailData *edata = pop_edata_get(m->emails[i]);
edata->refno = -1;
}
ret = pop_fetch_data(adata, "UIDL\r\n", &progress, check_uidl, m);
if (ret == -2)
{
mutt_error("%s", adata->err_msg);
}
}
if (ret == 0)
return 0;
pop_logout(m);
if (ret < -1)
return -1;
if (query_quadoption(C_PopReconnect,
_("Connection lost. Reconnect to POP server?")) != MUTT_YES)
{
return -1;
}
}
}
|
safe
| 402
|
authuser(char *myname, char *login_style, int persist)
{
char *challenge = NULL, *response, rbuf[1024], cbuf[128];
auth_session_t *as;
int fd = -1;
if (persist)
fd = open("/dev/tty", O_RDWR);
if (fd != -1) {
if (ioctl(fd, TIOCCHKVERAUTH) == 0)
goto good;
}
if (!(as = auth_userchallenge(myname, login_style, "auth-doas",
&challenge)))
errx(1, "Authorization failed");
if (!challenge) {
char host[HOST_NAME_MAX + 1];
if (gethostname(host, sizeof(host)))
snprintf(host, sizeof(host), "?");
snprintf(cbuf, sizeof(cbuf),
"\rdoas (%.32s@%.32s) password: ", myname, host);
challenge = cbuf;
}
response = readpassphrase(challenge, rbuf, sizeof(rbuf),
RPP_REQUIRE_TTY);
if (response == NULL && errno == ENOTTY) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"tty required for %s", myname);
errx(1, "a tty is required");
}
if (!auth_userresponse(as, response, 0)) {
explicit_bzero(rbuf, sizeof(rbuf));
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errx(1, "Authorization failed");
}
explicit_bzero(rbuf, sizeof(rbuf));
good:
if (fd != -1) {
int secs = 5 * 60;
ioctl(fd, TIOCSETVERAUTH, &secs);
close(fd);
}
}
|
safe
| 403
|
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
struct sta_info *sta;
u8 ac = txq->ac;
spin_lock_bh(&local->active_txq_lock[ac]);
if (!txqi->txq.sta)
goto out;
if (list_empty(&txqi->schedule_order))
goto out;
list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
schedule_order) {
if (iter == txqi)
break;
if (!iter->txq.sta) {
list_move_tail(&iter->schedule_order,
&local->active_txqs[ac]);
continue;
}
sta = container_of(iter->txq.sta, struct sta_info, sta);
if (sta->airtime[ac].deficit < 0)
sta->airtime[ac].deficit += sta->airtime_weight;
list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
}
sta = container_of(txqi->txq.sta, struct sta_info, sta);
if (sta->airtime[ac].deficit >= 0)
goto out;
sta->airtime[ac].deficit += sta->airtime_weight;
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
spin_unlock_bh(&local->active_txq_lock[ac]);
return false;
out:
if (!list_empty(&txqi->schedule_order))
list_del_init(&txqi->schedule_order);
spin_unlock_bh(&local->active_txq_lock[ac]);
return true;
}
|
safe
| 404
|
uint8_t werr_to_dns_err(WERROR werr)
{
if (W_ERROR_EQUAL(WERR_OK, werr)) {
return DNS_RCODE_OK;
} else if (W_ERROR_EQUAL(DNS_ERR(FORMAT_ERROR), werr)) {
return DNS_RCODE_FORMERR;
} else if (W_ERROR_EQUAL(DNS_ERR(SERVER_FAILURE), werr)) {
return DNS_RCODE_SERVFAIL;
} else if (W_ERROR_EQUAL(DNS_ERR(NAME_ERROR), werr)) {
return DNS_RCODE_NXDOMAIN;
} else if (W_ERROR_EQUAL(WERR_DNS_ERROR_NAME_DOES_NOT_EXIST, werr)) {
return DNS_RCODE_NXDOMAIN;
} else if (W_ERROR_EQUAL(DNS_ERR(NOT_IMPLEMENTED), werr)) {
return DNS_RCODE_NOTIMP;
} else if (W_ERROR_EQUAL(DNS_ERR(REFUSED), werr)) {
return DNS_RCODE_REFUSED;
} else if (W_ERROR_EQUAL(DNS_ERR(YXDOMAIN), werr)) {
return DNS_RCODE_YXDOMAIN;
} else if (W_ERROR_EQUAL(DNS_ERR(YXRRSET), werr)) {
return DNS_RCODE_YXRRSET;
} else if (W_ERROR_EQUAL(DNS_ERR(NXRRSET), werr)) {
return DNS_RCODE_NXRRSET;
} else if (W_ERROR_EQUAL(DNS_ERR(NOTAUTH), werr)) {
return DNS_RCODE_NOTAUTH;
} else if (W_ERROR_EQUAL(DNS_ERR(NOTZONE), werr)) {
return DNS_RCODE_NOTZONE;
} else if (W_ERROR_EQUAL(DNS_ERR(BADKEY), werr)) {
return DNS_RCODE_BADKEY;
}
DEBUG(5, ("No mapping exists for %s\n", win_errstr(werr)));
return DNS_RCODE_SERVFAIL;
}
|
safe
| 405
|
static int parse_hex_blob(const char *filename, hwaddr *addr, uint8_t *hex_blob,
size_t hex_blob_size, AddressSpace *as)
{
bool in_process = false; /* avoid re-enter and
* check whether record begin with ':' */
uint8_t *end = hex_blob + hex_blob_size;
uint8_t our_checksum = 0;
uint32_t record_index = 0;
HexParser parser = {
.filename = filename,
.bin_buf = g_malloc(hex_blob_size),
.start_addr = addr,
.as = as,
};
rom_transaction_begin();
for (; hex_blob < end; ++hex_blob) {
switch (*hex_blob) {
case '\r':
case '\n':
if (!in_process) {
break;
}
in_process = false;
if ((LEN_EXCEPT_DATA + parser.line.byte_count) * 2 !=
record_index ||
our_checksum != 0) {
parser.total_size = -1;
goto out;
}
if (handle_record_type(&parser) == -1) {
parser.total_size = -1;
goto out;
}
break;
/* start of a new record. */
case ':':
memset(&parser.line, 0, sizeof(HexLine));
in_process = true;
record_index = 0;
break;
/* decoding lines */
default:
if (!parse_record(&parser.line, &our_checksum, *hex_blob,
&record_index, in_process)) {
parser.total_size = -1;
goto out;
}
break;
}
}
out:
g_free(parser.bin_buf);
rom_transaction_end(parser.total_size != -1);
return parser.total_size;
}
|
safe
| 406
|
zzip_mem_entry_fopen(ZZIP_MEM_DISK * dir, ZZIP_MEM_ENTRY * entry)
{
/* keep this in sync with zzip_disk_entry_fopen */
ZZIP_DISK_FILE *file = malloc(sizeof(ZZIP_MEM_DISK_FILE));
if (! file)
return file;
file->buffer = dir->disk->buffer;
file->endbuf = dir->disk->endbuf;
file->avail = zzip_mem_entry_usize(entry);
if (! file->avail || zzip_mem_entry_data_stored(entry))
{ file->stored = zzip_mem_entry_to_data (entry); return file; }
file->stored = 0;
file->zlib.opaque = 0;
file->zlib.zalloc = Z_NULL;
file->zlib.zfree = Z_NULL;
file->zlib.avail_in = zzip_mem_entry_csize(entry);
file->zlib.next_in = zzip_mem_entry_to_data(entry);
debug2("compressed size %i", (int) file->zlib.avail_in);
if (file->zlib.next_in + file->zlib.avail_in >= file->endbuf)
goto error;
if (file->zlib.next_in < file->buffer)
goto error;
if (! zzip_mem_entry_data_deflated(entry) ||
inflateInit2(&file->zlib, -MAX_WBITS) != Z_OK)
{ free (file); return 0; }
return file;
error:
errno = EBADMSG;
return NULL;
}
|
safe
| 407
|
struct usb_host_interface *usb_find_alt_setting(
struct usb_host_config *config,
unsigned int iface_num,
unsigned int alt_num)
{
struct usb_interface_cache *intf_cache = NULL;
int i;
if (!config)
return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
intf_cache = config->intf_cache[i];
break;
}
}
if (!intf_cache)
return NULL;
for (i = 0; i < intf_cache->num_altsetting; i++)
if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num)
return &intf_cache->altsetting[i];
printk(KERN_DEBUG "Did not find alt setting %u for intf %u, "
"config %u\n", alt_num, iface_num,
config->desc.bConfigurationValue);
return NULL;
}
|
safe
| 408
|
aiff_get_chunk_data (SF_PRIVATE *psf, const SF_CHUNK_ITERATOR * iterator, SF_CHUNK_INFO * chunk_info)
{ sf_count_t pos ;
int indx ;
if ((indx = psf_find_read_chunk_iterator (&psf->rchunks, iterator)) < 0)
return SFE_UNKNOWN_CHUNK ;
if (chunk_info->data == NULL)
return SFE_BAD_CHUNK_DATA_PTR ;
chunk_info->id_size = psf->rchunks.chunks [indx].id_size ;
memcpy (chunk_info->id, psf->rchunks.chunks [indx].id, sizeof (chunk_info->id) / sizeof (*chunk_info->id)) ;
pos = psf_ftell (psf) ;
psf_fseek (psf, psf->rchunks.chunks [indx].offset, SEEK_SET) ;
psf_fread (chunk_info->data, SF_MIN (chunk_info->datalen, psf->rchunks.chunks [indx].len), 1, psf) ;
psf_fseek (psf, pos, SEEK_SET) ;
return SFE_NO_ERROR ;
} /* aiff_get_chunk_data */
|
safe
| 409
|
rsvg_new_filter_primitive_gaussian_blur (const char *element_name, RsvgNode *parent)
{
RsvgFilterPrimitiveGaussianBlur *filter;
filter = g_new0 (RsvgFilterPrimitiveGaussianBlur, 1);
filter->super.in = g_string_new ("none");
filter->super.result = g_string_new ("none");
filter->sdx = 0;
filter->sdy = 0;
filter->super.render = rsvg_filter_primitive_gaussian_blur_render;
return rsvg_rust_cnode_new (RSVG_NODE_TYPE_FILTER_PRIMITIVE_GAUSSIAN_BLUR,
parent,
rsvg_state_new (),
filter,
rsvg_filter_primitive_gaussian_blur_set_atts,
rsvg_filter_draw,
rsvg_filter_primitive_free);
}
|
safe
| 410
|
enc_locale_env(char *locale)
{
char *s = locale;
char *p;
int i;
char buf[50];
if (s == NULL || *s == NUL)
if ((s = getenv("LC_ALL")) == NULL || *s == NUL)
if ((s = getenv("LC_CTYPE")) == NULL || *s == NUL)
s = getenv("LANG");
if (s == NULL || *s == NUL)
return NULL;
// The most generic locale format is:
// language[_territory][.codeset][@modifier][+special][,[sponsor][_revision]]
// If there is a '.' remove the part before it.
// if there is something after the codeset, remove it.
// Make the name lowercase and replace '_' with '-'.
// Exception: "ja_JP.EUC" == "euc-jp", "zh_CN.EUC" = "euc-cn",
// "ko_KR.EUC" == "euc-kr"
if ((p = (char *)vim_strchr((char_u *)s, '.')) != NULL)
{
if (p > s + 2 && STRNICMP(p + 1, "EUC", 3) == 0
&& !isalnum((int)p[4]) && p[4] != '-' && p[-3] == '_')
{
// copy "XY.EUC" to "euc-XY" to buf[10]
STRCPY(buf + 10, "euc-");
buf[14] = p[-2];
buf[15] = p[-1];
buf[16] = 0;
s = buf + 10;
}
else
s = p + 1;
}
for (i = 0; i < (int)sizeof(buf) - 1 && s[i] != NUL; ++i)
{
if (s[i] == '_' || s[i] == '-')
buf[i] = '-';
else if (isalnum((int)s[i]))
buf[i] = TOLOWER_ASC(s[i]);
else
break;
}
buf[i] = NUL;
return enc_canonize((char_u *)buf);
}
|
safe
| 411
|
config_tinker(
config_tree *ptree
)
{
attr_val * tinker;
int item;
item = -1; /* quiet warning */
tinker = HEAD_PFIFO(ptree->tinker);
for (; tinker != NULL; tinker = tinker->link) {
switch (tinker->attr) {
default:
NTP_INSIST(0);
break;
case T_Allan:
item = LOOP_ALLAN;
break;
case T_Dispersion:
item = LOOP_PHI;
break;
case T_Freq:
item = LOOP_FREQ;
break;
case T_Huffpuff:
item = LOOP_HUFFPUFF;
break;
case T_Panic:
item = LOOP_PANIC;
break;
case T_Step:
item = LOOP_MAX;
break;
case T_Stepout:
item = LOOP_MINSTEP;
break;
}
loop_config(item, tinker->value.d);
}
}
|
safe
| 412
|
SPL_METHOD(FilesystemIterator, current)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) {
spl_filesystem_object_get_file_name(intern TSRMLS_CC);
RETURN_STRINGL(intern->file_name, intern->file_name_len, 1);
} else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) {
spl_filesystem_object_get_file_name(intern TSRMLS_CC);
spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value TSRMLS_CC);
} else {
RETURN_ZVAL(getThis(), 1, 0);
/*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/
}
}
|
safe
| 413
|
R_API DsoJsonObj *r_bin_java_get_interface_json_definitions(RBinJavaObj *bin) {
RList *the_list;
DsoJsonObj *json_list = dso_json_list_new ();
RListIter *iter = NULL;
char *new_str;
if (!bin || !(the_list = r_bin_java_get_interface_names (bin))) {
return json_list;
}
r_list_foreach (the_list, iter, new_str) {
char *tmp = new_str;
// eprintf ("Processing string: %s\n", new_str);
while (*tmp) {
if (*tmp == '/') {
*tmp = '.';
}
tmp++;
}
// eprintf ("adding string: %s\n", new_str);
dso_json_list_append_str (json_list, new_str);
}
r_list_free (the_list);
return json_list;
}
|
safe
| 414
|
QPDFOutlineObjectHelper::QPDFOutlineObjectHelper(
QPDFObjectHandle oh, QPDFOutlineDocumentHelper& dh, int depth) :
QPDFObjectHelper(oh),
m(new Members(dh))
{
if (depth > 50)
{
// Not exercised in test suite, but was tested manually by
// temporarily changing max depth to 1.
return;
}
if (QPDFOutlineDocumentHelper::Accessor::checkSeen(
this->m->dh, this->oh.getObjGen()))
{
QTC::TC("qpdf", "QPDFOutlineObjectHelper loop");
return;
}
QPDFObjectHandle cur = oh.getKey("/First");
while (! cur.isNull())
{
QPDFOutlineObjectHelper new_ooh(cur, dh, 1 + depth);
new_ooh.m->parent = new QPDFOutlineObjectHelper(*this);
this->m->kids.push_back(new_ooh);
cur = cur.getKey("/Next");
}
}
|
safe
| 415
|
managesieve_parser_read_literal_data(struct managesieve_parser *parser,
const unsigned char *data,
size_t data_size)
{
if (parser->literal_skip_crlf) {
/* skip \r\n or \n, anything else gives an error */
if (data_size == 0)
return FALSE;
if (*data == '\r') {
parser->line_size++;
data++; data_size--;
i_stream_skip(parser->input, 1);
if (data_size == 0)
return FALSE;
}
if (*data != '\n') {
parser->error = "Missing LF after literal size";
return FALSE;
}
parser->line_size++;
data++; data_size--;
i_stream_skip(parser->input, 1);
parser->literal_skip_crlf = FALSE;
i_assert(parser->cur_pos == 0);
}
if ((parser->flags & MANAGESIEVE_PARSE_FLAG_STRING_STREAM) == 0) {
/* now we just wait until we've read enough data */
if (data_size < parser->literal_size) {
return FALSE;
} else {
if ( !uni_utf8_data_is_valid
(data, (size_t)parser->literal_size) ) {
parser->error = "Invalid UTF-8 character in literal string.";
return FALSE;
}
managesieve_parser_save_arg(parser, data,
(size_t)parser->literal_size);
parser->cur_pos = (size_t)parser->literal_size;
return TRUE;
}
} else {
/* we don't read the data; we just create a stream for the literal */
parser->eol = TRUE;
parser->str_stream = i_stream_create_limit
(parser->input, parser->literal_size);
managesieve_parser_save_arg(parser, NULL, 0);
return TRUE;
}
}
|
safe
| 416
|
int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
char *look_ahead, int la_len)
{
struct sk_buff *skb;
PRINTK("entering mac_drv_rx_init(len=%d)\n", len);
// "Received" a SMT or NSA frame of the local SMT.
if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
PRINTK("fddi: Discard invalid local SMT frame\n");
PRINTK(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
len, la_len, (unsigned long) look_ahead);
return (0);
}
skb = alloc_skb(len + 3, GFP_ATOMIC);
if (!skb) {
PRINTK("fddi: Local SMT: skb memory exhausted.\n");
return (0);
}
skb_reserve(skb, 3);
skb_put(skb, len);
skb_copy_to_linear_data(skb, look_ahead, len);
// deliver frame to system
skb->protocol = fddi_type_trans(skb, smc->os.dev);
netif_rx(skb);
return (0);
} // mac_drv_rx_init
|
safe
| 417
|
int ring_buffer_print_page_header(struct trace_seq *s)
{
struct buffer_data_page field;
trace_seq_printf(s, "\tfield: u64 timestamp;\t"
"offset:0;\tsize:%u;\tsigned:%u;\n",
(unsigned int)sizeof(field.time_stamp),
(unsigned int)is_signed_type(u64));
trace_seq_printf(s, "\tfield: local_t commit;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
(unsigned int)sizeof(field.commit),
(unsigned int)is_signed_type(long));
trace_seq_printf(s, "\tfield: int overwrite;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
1,
(unsigned int)is_signed_type(long));
trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data),
(unsigned int)BUF_PAGE_SIZE,
(unsigned int)is_signed_type(char));
return !trace_seq_has_overflowed(s);
}
|
safe
| 418
|
SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
{
struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
/* Release our hold on the endpoint. */
sp = sctp_sk(sk);
if (sp->do_auto_asconf) {
sp->do_auto_asconf = 0;
list_del(&sp->auto_asconf_list);
}
sctp_endpoint_free(sp->ep);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
|
safe
| 419
|
GF_Err mhac_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s;
e = gf_isom_box_write(s, bs);
if (e) return e;
gf_bs_write_u8(bs, ptr->configuration_version);
gf_bs_write_u8(bs, ptr->mha_pl_indication);
gf_bs_write_u8(bs, ptr->reference_channel_layout);
gf_bs_write_u16(bs, ptr->mha_config ? ptr->mha_config_size : 0);
if (ptr->mha_config && ptr->mha_config_size)
gf_bs_write_data(bs, ptr->mha_config, ptr->mha_config_size);
return GF_OK;
|
safe
| 420
|
static void prb_close_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1,
struct packet_sock *po, unsigned int stat)
{
__u32 status = TP_STATUS_USER | stat;
struct tpacket3_hdr *last_pkt;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
struct sock *sk = &po->sk;
if (po->stats.stats3.tp_drops)
status |= TP_STATUS_LOSING;
last_pkt = (struct tpacket3_hdr *)pkc1->prev;
last_pkt->tp_next_offset = 0;
/* Get the ts of the last pkt */
if (BLOCK_NUM_PKTS(pbd1)) {
h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
} else {
/* Ok, we tmo'd - so get the current time.
*
* It shouldn't really happen as we don't close empty
* blocks. See prb_retire_rx_blk_timer_expired().
*/
struct timespec ts;
getnstimeofday(&ts);
h1->ts_last_pkt.ts_sec = ts.tv_sec;
h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
}
smp_wmb();
/* Flush the block */
prb_flush_block(pkc1, pbd1, status);
sk->sk_data_ready(sk);
pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
}
|
safe
| 421
|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
BatchToSpaceNDContext op_context(context, node);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) >= kInputMinDimensionNum);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) <= kInputMaxDimensionNum);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
if (!IsConstantTensor(op_context.block_shape) ||
!IsConstantTensor(op_context.crops)) {
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, &op_context);
}
|
safe
| 422
|
process_button(struct parsed_tag *tag)
{
Str tmp = NULL;
char *p, *q, *r, *qq = "";
int qlen, v;
if (cur_form_id < 0) {
char *s = "<form_int method=internal action=none>";
tmp = process_form(parse_tag(&s, TRUE));
}
if (tmp == NULL)
tmp = Strnew();
p = "submit";
parsedtag_get_value(tag, ATTR_TYPE, &p);
q = NULL;
parsedtag_get_value(tag, ATTR_VALUE, &q);
r = "";
parsedtag_get_value(tag, ATTR_NAME, &r);
v = formtype(p);
if (v == FORM_UNKNOWN)
return NULL;
switch (v) {
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
case FORM_INPUT_RESET:
break;
default:
p = "submit";
v = FORM_INPUT_SUBMIT;
break;
}
if (!q) {
switch (v) {
case FORM_INPUT_SUBMIT:
case FORM_INPUT_BUTTON:
q = "SUBMIT";
break;
case FORM_INPUT_RESET:
q = "RESET";
break;
}
}
if (q) {
qq = html_quote(q);
qlen = strlen(q);
}
/* Strcat_charp(tmp, "<pre_int>"); */
Strcat(tmp, Sprintf("<input_alt hseq=\"%d\" fid=\"%d\" type=\"%s\" "
"name=\"%s\" value=\"%s\">",
cur_hseq++, cur_form_id, html_quote(p),
html_quote(r), qq));
return tmp;
}
|
safe
| 423
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
if (!kvm_x86_ops.update_cr8_intercept)
return;
if (!lapic_in_kernel(vcpu))
return;
if (vcpu->arch.apicv_active)
return;
if (!vcpu->arch.apic->vapic_addr)
max_irr = kvm_lapic_find_highest_irr(vcpu);
else
max_irr = -1;
if (max_irr != -1)
max_irr >>= 4;
tpr = kvm_lapic_get_cr8(vcpu);
static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr);
}
|
safe
| 424
|
print_string (FILE *fp, const byte *p, size_t n, int delim)
{
for ( ; n; n--, p++ )
{
if (*p < 0x20 || (*p >= 0x7f && *p < 0xa0) || *p == delim)
{
putc('\\', fp);
if( *p == '\n' )
putc('n', fp);
else if( *p == '\r' )
putc('r', fp);
else if( *p == '\f' )
putc('f', fp);
else if( *p == '\v' )
putc('v', fp);
else if( *p == '\b' )
putc('b', fp);
else if( !*p )
putc('0', fp);
else
fprintf(fp, "x%02x", *p );
}
else
putc(*p, fp);
}
}
|
safe
| 425
|
static int smacker_decode_tree(BitstreamContext *bc, HuffContext *hc,
uint32_t prefix, int length)
{
if (length > SMKTREE_DECODE_MAX_RECURSION) {
av_log(NULL, AV_LOG_ERROR, "Maximum tree recursion level exceeded.\n");
return AVERROR_INVALIDDATA;
}
if (!bitstream_read_bit(bc)) { // Leaf
if(hc->current >= 256){
av_log(NULL, AV_LOG_ERROR, "Tree size exceeded!\n");
return AVERROR_INVALIDDATA;
}
if(length){
hc->bits[hc->current] = prefix;
hc->lengths[hc->current] = length;
} else {
hc->bits[hc->current] = 0;
hc->lengths[hc->current] = 0;
}
hc->values[hc->current] = bitstream_read(bc, 8);
hc->current++;
if(hc->maxlength < length)
hc->maxlength = length;
return 0;
} else { //Node
int r;
length++;
r = smacker_decode_tree(bc, hc, prefix, length);
if(r)
return r;
return smacker_decode_tree(bc, hc, prefix | (1 << (length - 1)), length);
}
}
|
safe
| 426
|
UpdateWaitHandles(LPHANDLE *handles_ptr, LPDWORD count,
HANDLE io_event, HANDLE exit_event, list_item_t *threads)
{
static DWORD size = 10;
static LPHANDLE handles = NULL;
DWORD pos = 0;
if (handles == NULL)
{
handles = malloc(size * sizeof(HANDLE));
*handles_ptr = handles;
if (handles == NULL)
{
return ERROR_OUTOFMEMORY;
}
}
handles[pos++] = io_event;
if (!threads)
{
handles[pos++] = exit_event;
}
while (threads)
{
if (pos == size)
{
LPHANDLE tmp;
size += 10;
tmp = realloc(handles, size * sizeof(HANDLE));
if (tmp == NULL)
{
size -= 10;
*count = pos;
return ERROR_OUTOFMEMORY;
}
handles = tmp;
*handles_ptr = handles;
}
handles[pos++] = threads->data;
threads = threads->next;
}
*count = pos;
return NO_ERROR;
}
|
safe
| 427
|
gst_qtdemux_check_seekability (GstQTDemux * demux)
{
GstQuery *query;
gboolean seekable = FALSE;
gint64 start = -1, stop = -1;
if (demux->upstream_size)
return;
query = gst_query_new_seeking (GST_FORMAT_BYTES);
if (!gst_pad_peer_query (demux->sinkpad, query)) {
GST_DEBUG_OBJECT (demux, "seeking query failed");
goto done;
}
gst_query_parse_seeking (query, NULL, &seekable, &start, &stop);
/* try harder to query upstream size if we didn't get it the first time */
if (seekable && stop == -1) {
GST_DEBUG_OBJECT (demux, "doing duration query to fix up unset stop");
gst_pad_peer_query_duration (demux->sinkpad, GST_FORMAT_BYTES, &stop);
}
/* if upstream doesn't know the size, it's likely that it's not seekable in
* practice even if it technically may be seekable */
if (seekable && (start != 0 || stop <= start)) {
GST_DEBUG_OBJECT (demux, "seekable but unknown start/stop -> disable");
seekable = FALSE;
}
done:
gst_query_unref (query);
GST_DEBUG_OBJECT (demux, "seekable: %d (%" G_GUINT64_FORMAT " - %"
G_GUINT64_FORMAT ")", seekable, start, stop);
demux->upstream_seekable = seekable;
demux->upstream_size = seekable ? stop : -1;
}
|
safe
| 428
|
static TEE_Result alloc_pgt(struct user_ta_ctx *utc)
{
struct thread_specific_data *tsd __maybe_unused;
vaddr_t b;
vaddr_t e;
size_t ntbl;
ntbl = get_num_req_pgts(utc, &b, &e);
if (!pgt_check_avail(ntbl)) {
EMSG("%zu page tables not available", ntbl);
return TEE_ERROR_OUT_OF_MEMORY;
}
#ifdef CFG_PAGED_USER_TA
tsd = thread_get_tsd();
if (&utc->ctx == tsd->ctx) {
/*
* The supplied utc is the current active utc, allocate the
* page tables too as the pager needs to use them soon.
*/
pgt_alloc(&tsd->pgt_cache, &utc->ctx, b, e - 1);
}
#endif
return TEE_SUCCESS;
}
|
safe
| 429
|
static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array,
struct nfs_readdir_descriptor *desc)
{
int i;
loff_t new_pos;
int status = -EAGAIN;
if (!nfs_readdir_array_cookie_in_range(array, desc->dir_cookie))
goto check_eof;
for (i = 0; i < array->size; i++) {
if (array->array[i].cookie == desc->dir_cookie) {
struct nfs_inode *nfsi = NFS_I(file_inode(desc->file));
new_pos = desc->current_index + i;
if (desc->attr_gencount != nfsi->attr_gencount ||
!nfs_readdir_inode_mapping_valid(nfsi)) {
desc->duped = 0;
desc->attr_gencount = nfsi->attr_gencount;
} else if (new_pos < desc->prev_index) {
if (desc->duped > 0
&& desc->dup_cookie == desc->dir_cookie) {
if (printk_ratelimit()) {
pr_notice("NFS: directory %pD2 contains a readdir loop."
"Please contact your server vendor. "
"The file: %s has duplicate cookie %llu\n",
desc->file, array->array[i].name, desc->dir_cookie);
}
status = -ELOOP;
goto out;
}
desc->dup_cookie = desc->dir_cookie;
desc->duped = -1;
}
if (nfs_readdir_use_cookie(desc->file))
desc->ctx->pos = desc->dir_cookie;
else
desc->ctx->pos = new_pos;
desc->prev_index = new_pos;
desc->cache_entry_index = i;
return 0;
}
}
check_eof:
if (array->page_is_eof) {
status = -EBADCOOKIE;
if (desc->dir_cookie == array->last_cookie)
desc->eof = true;
}
out:
return status;
}
|
safe
| 430
|
write_reg_contents_lst(
int name,
char_u **strings,
int maxlen UNUSED,
int must_append,
int yank_type,
long block_len)
{
yankreg_T *old_y_previous, *old_y_current;
if (name == '/' || name == '=')
{
char_u *s;
if (strings[0] == NULL)
s = (char_u *)"";
else if (strings[1] != NULL)
{
emsg(_(e_search_pattern_and_expression_register_may_not_contain_two_or_more_lines));
return;
}
else
s = strings[0];
write_reg_contents_ex(name, s, -1, must_append, yank_type, block_len);
return;
}
if (name == '_') // black hole: nothing to do
return;
if (init_write_reg(name, &old_y_previous, &old_y_current, must_append,
&yank_type) == FAIL)
return;
str_to_reg(y_current, yank_type, (char_u *)strings, -1, block_len, TRUE);
finish_write_reg(name, old_y_previous, old_y_current);
}
|
safe
| 431
|
ialloc_alloc_state(gs_memory_t * parent, uint clump_size)
{
clump_t *cp;
gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
if (iimem == 0)
return 0;
iimem->stable_memory = (gs_memory_t *)iimem;
iimem->procs = gs_ref_memory_procs;
iimem->gs_lib_ctx = parent->gs_lib_ctx;
iimem->non_gc_memory = parent;
iimem->thread_safe_memory = parent->thread_safe_memory;
iimem->clump_size = clump_size;
#ifdef MEMENTO
iimem->large_size = 1;
#else
iimem->large_size = ((clump_size / 4) & -obj_align_mod) + 1;
#endif
iimem->is_controlled = false;
iimem->gc_status.vm_threshold = clump_size * 3L;
iimem->gc_status.max_vm = max_long;
iimem->gc_status.signal_value = 0;
iimem->gc_status.enabled = false;
iimem->gc_status.requested = 0;
iimem->gc_allocated = 0;
iimem->previous_status.allocated = 0;
iimem->previous_status.used = 0;
ialloc_reset(iimem);
iimem->root = cp;
ialloc_set_limit(iimem);
iimem->cc = NULL;
iimem->save_level = 0;
iimem->new_mask = 0;
iimem->test_mask = ~0;
iimem->streams = 0;
iimem->names_array = 0;
iimem->roots = 0;
iimem->num_contexts = 0;
iimem->saved = 0;
return iimem;
}
|
safe
| 432
|
static void slcan_transmit(struct work_struct *work)
{
struct slcan *sl = container_of(work, struct slcan, tx_work);
int actual;
spin_lock_bh(&sl->lock);
/* First make sure we're connected. */
if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
spin_unlock_bh(&sl->lock);
return;
}
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
spin_unlock_bh(&sl->lock);
netif_wake_queue(sl->dev);
return;
}
actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
sl->xleft -= actual;
sl->xhead += actual;
spin_unlock_bh(&sl->lock);
}
|
safe
| 433
|
QPDFWriter::pushEncryptionFilter()
{
if (this->encrypted && (! this->cur_data_key.empty()))
{
Pipeline* p = 0;
if (this->encrypt_use_aes)
{
p = new Pl_AES_PDF(
"aes stream encryption", this->pipeline, true,
QUtil::unsigned_char_pointer(this->cur_data_key),
this->cur_data_key.length());
}
else
{
p = new Pl_RC4("rc4 stream encryption", this->pipeline,
QUtil::unsigned_char_pointer(this->cur_data_key),
this->cur_data_key.length());
}
pushPipeline(p);
}
// Must call this unconditionally so we can call popPipelineStack
// to balance pushEncryptionFilter().
activatePipelineStack();
}
|
safe
| 434
|
function_descriptors(struct usb_function *f,
enum usb_device_speed speed)
{
struct usb_descriptor_header **descriptors;
/*
* NOTE: we try to help gadget drivers which might not be setting
* max_speed appropriately.
*/
switch (speed) {
case USB_SPEED_SUPER_PLUS:
descriptors = f->ssp_descriptors;
if (descriptors)
break;
fallthrough;
case USB_SPEED_SUPER:
descriptors = f->ss_descriptors;
if (descriptors)
break;
fallthrough;
case USB_SPEED_HIGH:
descriptors = f->hs_descriptors;
if (descriptors)
break;
fallthrough;
default:
descriptors = f->fs_descriptors;
}
/*
* if we can't find any descriptors at all, then this gadget deserves to
* Oops with a NULL pointer dereference
*/
return descriptors;
}
|
safe
| 435
|
static void StorePicturePTS( encoder_t *p_enc, uint32_t u_pnum, mtime_t i_pts )
{
encoder_sys_t *p_sys = p_enc->p_sys;
for( int i = 0; i<SCHRO_PTS_TLB_SIZE; i++ )
{
if( p_sys->pts_tlb[i].b_empty )
{
p_sys->pts_tlb[i].u_pnum = u_pnum;
p_sys->pts_tlb[i].i_pts = i_pts;
p_sys->pts_tlb[i].b_empty = false;
return;
}
}
msg_Err( p_enc, "Could not store PTS %"PRId64" for frame %u", i_pts, u_pnum );
}
|
safe
| 436
|
static int ssl_parse_renegotiation_info( mbedtls_ssl_context *ssl,
const unsigned char *buf,
size_t len )
{
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
/* Check verify-data in constant-time. The length OTOH is no secret */
if( len != 1 + ssl->verify_data_len ||
buf[0] != ssl->verify_data_len ||
mbedtls_ssl_safer_memcmp( buf + 1, ssl->peer_verify_data,
ssl->verify_data_len ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "non-matching renegotiation info" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
}
else
#endif /* MBEDTLS_SSL_RENEGOTIATION */
{
if( len != 1 || buf[0] != 0x0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "non-zero length renegotiation info" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
}
return( 0 );
}
|
safe
| 437
|
tsize_t t2p_write_pdf_string(const char* pdfstr, TIFF* output)
{
tsize_t written = 0;
uint32 i = 0;
char buffer[64];
size_t len = 0;
len = strlen(pdfstr);
written += t2pWriteFile(output, (tdata_t) "(", 1);
for (i=0; i<len; i++) {
if((pdfstr[i]&0x80) || (pdfstr[i]==127) || (pdfstr[i]<32)){
snprintf(buffer, sizeof(buffer), "\\%.3o", ((unsigned char)pdfstr[i]));
written += t2pWriteFile(output, (tdata_t)buffer, 4);
} else {
switch (pdfstr[i]){
case 0x08:
written += t2pWriteFile(output, (tdata_t) "\\b", 2);
break;
case 0x09:
written += t2pWriteFile(output, (tdata_t) "\\t", 2);
break;
case 0x0A:
written += t2pWriteFile(output, (tdata_t) "\\n", 2);
break;
case 0x0C:
written += t2pWriteFile(output, (tdata_t) "\\f", 2);
break;
case 0x0D:
written += t2pWriteFile(output, (tdata_t) "\\r", 2);
break;
case 0x28:
written += t2pWriteFile(output, (tdata_t) "\\(", 2);
break;
case 0x29:
written += t2pWriteFile(output, (tdata_t) "\\)", 2);
break;
case 0x5C:
written += t2pWriteFile(output, (tdata_t) "\\\\", 2);
break;
default:
written += t2pWriteFile(output, (tdata_t) &pdfstr[i], 1);
}
}
}
written += t2pWriteFile(output, (tdata_t) ") ", 1);
return(written);
}
|
safe
| 438
|
nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
struct nfsd4_getdeviceinfo *gdev)
{
DECODE_HEAD;
u32 num, i;
READ_BUF(sizeof(struct nfsd4_deviceid) + 3 * 4);
COPYMEM(&gdev->gd_devid, sizeof(struct nfsd4_deviceid));
gdev->gd_layout_type = be32_to_cpup(p++);
gdev->gd_maxcount = be32_to_cpup(p++);
num = be32_to_cpup(p++);
if (num) {
READ_BUF(4 * num);
gdev->gd_notify_types = be32_to_cpup(p++);
for (i = 1; i < num; i++) {
if (be32_to_cpup(p++)) {
status = nfserr_inval;
goto out;
}
}
}
DECODE_TAIL;
}
|
safe
| 439
|
static void ndisc_redirect_rcv(struct sk_buff *skb)
{
u8 *hdr;
struct ndisc_options ndopts;
struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb);
u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
offsetof(struct rd_msg, opt));
#ifdef CONFIG_IPV6_NDISC_NODETYPE
switch (skb->ndisc_nodetype) {
case NDISC_NODETYPE_HOST:
case NDISC_NODETYPE_NODEFAULT:
ND_PRINTK(2, warn,
"Redirect: from host or unauthorized router\n");
return;
}
#endif
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
ND_PRINTK(2, warn,
"Redirect: source address is not link-local\n");
return;
}
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
return;
if (!ndopts.nd_opts_rh) {
ip6_redirect_no_header(skb, dev_net(skb->dev),
skb->dev->ifindex, 0);
return;
}
hdr = (u8 *)ndopts.nd_opts_rh;
hdr += 8;
if (!pskb_pull(skb, hdr - skb_transport_header(skb)))
return;
icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
}
|
safe
| 440
|
static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
const char **name,
void **value, size_t *len)
{
const struct task_security_struct *tsec = selinux_cred(current_cred());
struct superblock_security_struct *sbsec;
u32 newsid, clen;
int rc;
char *context;
sbsec = selinux_superblock(dir->i_sb);
newsid = tsec->create_sid;
rc = selinux_determine_inode_label(tsec, dir, qstr,
inode_mode_to_security_class(inode->i_mode),
&newsid);
if (rc)
return rc;
/* Possibly defer initialization to selinux_complete_init. */
if (sbsec->flags & SE_SBINITIALIZED) {
struct inode_security_struct *isec = selinux_inode(inode);
isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = LABEL_INITIALIZED;
}
if (!selinux_initialized(&selinux_state) ||
!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (name)
*name = XATTR_SELINUX_SUFFIX;
if (value && len) {
rc = security_sid_to_context_force(&selinux_state, newsid,
&context, &clen);
if (rc)
return rc;
*value = context;
*len = clen;
}
return 0;
}
|
safe
| 441
|
bool DecimalQuantity::operator==(const DecimalQuantity& other) const {
bool basicEquals =
scale == other.scale
&& precision == other.precision
&& flags == other.flags
&& lOptPos == other.lOptPos
&& lReqPos == other.lReqPos
&& rReqPos == other.rReqPos
&& rOptPos == other.rOptPos
&& isApproximate == other.isApproximate;
if (!basicEquals) {
return false;
}
if (precision == 0) {
return true;
} else if (isApproximate) {
return origDouble == other.origDouble && origDelta == other.origDelta;
} else {
for (int m = getUpperDisplayMagnitude(); m >= getLowerDisplayMagnitude(); m--) {
if (getDigit(m) != other.getDigit(m)) {
return false;
}
}
return true;
}
}
|
safe
| 442
|
httpSetAuthString(http_t *http, /* I - HTTP connection */
const char *scheme, /* I - Auth scheme (NULL to clear it) */
const char *data) /* I - Auth data (NULL for none) */
{
/*
* Range check input...
*/
if (!http)
return;
if (http->authstring && http->authstring != http->_authstring)
free(http->authstring);
http->authstring = http->_authstring;
if (scheme)
{
/*
* Set the current authorization string...
*/
size_t len = strlen(scheme) + (data ? strlen(data) + 1 : 0) + 1;
char *temp;
if (len > sizeof(http->_authstring))
{
if ((temp = malloc(len)) == NULL)
len = sizeof(http->_authstring);
else
http->authstring = temp;
}
if (data)
snprintf(http->authstring, len, "%s %s", scheme, data);
else
strlcpy(http->authstring, scheme, len);
}
else
{
/*
* Clear the current authorization string...
*/
http->_authstring[0] = '\0';
}
}
|
safe
| 443
|
PHP_FUNCTION(html_entity_decode)
{
char *str, *hint_charset = NULL;
int str_len, hint_charset_len = 0;
size_t new_len = 0;
long quote_style = ENT_COMPAT;
char *replaced;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|ls", &str, &str_len,
"e_style, &hint_charset, &hint_charset_len) == FAILURE) {
return;
}
replaced = php_unescape_html_entities(str, str_len, &new_len, 1 /*all*/, quote_style, hint_charset TSRMLS_CC);
if (replaced) {
RETURN_STRINGL(replaced, (int)new_len, 0);
}
RETURN_FALSE;
}
|
safe
| 444
|
scanner_detect_eval_call (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p) /**< scanner context */
{
if (context_p->token.keyword_type == LEXER_KEYW_EVAL
&& lexer_check_next_character (context_p, LIT_CHAR_LEFT_PAREN))
{
#if JERRY_ESNEXT
const uint16_t flags = (uint16_t) (SCANNER_LITERAL_POOL_CAN_EVAL | SCANNER_LITERAL_POOL_HAS_SUPER_REFERENCE);
#else /* !JERRY_ESNEXT */
const uint16_t flags = SCANNER_LITERAL_POOL_CAN_EVAL;
#endif /* JERRY_ESNEXT */
scanner_context_p->active_literal_pool_p->status_flags |= flags;
}
} /* scanner_detect_eval_call */
|
safe
| 445
|
pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr)
{
int mmc;
/* Number of msi messages must be a power of 2 between 1 and 32 */
if (((msgnum & (msgnum - 1)) != 0) || msgnum < 1 || msgnum > 32) {
pr_err("%s: invalid number of msi messages!\n", __func__);
return -1;
}
mmc = ffs(msgnum) - 1;
bzero(msicap, sizeof(struct msicap));
msicap->capid = PCIY_MSI;
msicap->nextptr = nextptr;
msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1);
return 0;
}
|
safe
| 446
|
bool Lex_input_stream::consume_comment(int remaining_recursions_permitted)
{
// only one level of nested comments are allowed
DBUG_ASSERT(remaining_recursions_permitted == 0 ||
remaining_recursions_permitted == 1);
uchar c;
while (!eof())
{
c= yyGet();
if (remaining_recursions_permitted == 1)
{
if ((c == '/') && (yyPeek() == '*'))
{
yyUnput('('); // Replace nested "/*..." with "(*..."
yySkip(); // and skip "("
yySkip(); /* Eat asterisk */
if (consume_comment(0))
return true;
yyUnput(')'); // Replace "...*/" with "...*)"
yySkip(); // and skip ")"
continue;
}
}
if (c == '*')
{
if (yyPeek() == '/')
{
yySkip(); // Eat slash
return FALSE;
}
}
if (c == '\n')
yylineno++;
}
return TRUE;
}
|
safe
| 447
|
formatfloat(PyObject *v, int flags, int prec, int type)
{
char *p;
PyObject *result;
double x;
x = PyFloat_AsDouble(v);
if (x == -1.0 && PyErr_Occurred()) {
PyErr_Format(PyExc_TypeError, "float argument required, "
"not %.200s", Py_TYPE(v)->tp_name);
return NULL;
}
if (prec < 0)
prec = 6;
p = PyOS_double_to_string(x, type, prec,
(flags & F_ALT) ? Py_DTSF_ALT : 0, NULL);
if (p == NULL)
return NULL;
result = PyString_FromStringAndSize(p, strlen(p));
PyMem_Free(p);
return result;
}
|
safe
| 448
|
bool checkAuthorizationImplPreParse(OperationContext* opCtx,
const Command* command,
const OpMsgRequest& request) {
auto client = opCtx->getClient();
if (client->isInDirectClient())
return true;
uassert(ErrorCodes::Unauthorized,
str::stream() << command->getName() << " may only be run against the admin database.",
!command->adminOnly() || request.getDatabase() == NamespaceString::kAdminDb);
auto authzSession = AuthorizationSession::get(client);
if (!authzSession->getAuthorizationManager().isAuthEnabled()) {
// Running without auth, so everything should be allowed except remotely invoked
// commands that have the 'localHostOnlyIfNoAuth' restriction.
uassert(ErrorCodes::Unauthorized,
str::stream() << command->getName()
<< " must run from localhost when running db without auth",
!command->adminOnly() || !command->localHostOnlyIfNoAuth() ||
client->getIsLocalHostConnection());
return true; // Blanket authorization: don't need to check anything else.
}
if (authzSession->isUsingLocalhostBypass())
return false; // Still can't decide on auth because of the localhost bypass.
uassert(ErrorCodes::Unauthorized,
str::stream() << "command " << command->getName() << " requires authentication",
!command->requiresAuth() || authzSession->isAuthenticated());
return false;
}
|
safe
| 449
|
void __detach_mounts(struct dentry *dentry)
{
struct mountpoint *mp;
struct mount *mnt;
namespace_lock();
mp = lookup_mountpoint(dentry);
if (IS_ERR_OR_NULL(mp))
goto out_unlock;
lock_mount_hash();
event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
umount_mnt(mnt);
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
unlock_mount_hash();
put_mountpoint(mp);
out_unlock:
namespace_unlock();
}
|
safe
| 450
|
static void write_volume_label(DOS_FS * fs, char *label)
{
time_t now = time(NULL);
struct tm *mtime = localtime(&now);
off_t offset;
int created;
DIR_ENT de;
created = 0;
offset = find_volume_de(fs, &de);
if (offset == 0) {
created = 1;
offset = alloc_rootdir_entry(fs, &de, label);
}
memcpy(de.name, label, 11);
de.time = htole16((unsigned short)((mtime->tm_sec >> 1) +
(mtime->tm_min << 5) +
(mtime->tm_hour << 11)));
de.date = htole16((unsigned short)(mtime->tm_mday +
((mtime->tm_mon + 1) << 5) +
((mtime->tm_year - 80) << 9)));
if (created) {
de.attr = ATTR_VOLUME;
de.ctime_ms = 0;
de.ctime = de.time;
de.cdate = de.date;
de.adate = de.date;
de.starthi = 0;
de.start = 0;
de.size = 0;
}
fs_write(offset, sizeof(DIR_ENT), &de);
}
|
safe
| 451
|
static int ext4_link(struct dentry *old_dentry,
struct inode *dir, struct dentry *dentry)
{
handle_t *handle;
struct inode *inode = old_dentry->d_inode;
int err, retries = 0;
if (EXT4_DIR_LINK_MAX(inode))
return -EMLINK;
/*
* Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing
* otherwise has the potential to corrupt the orphan inode list.
*/
if (inode->i_nlink == 0)
return -ENOENT;
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS);
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode->i_ctime = ext4_current_time(inode);
ext4_inc_count(handle, inode);
atomic_inc(&inode->i_count);
err = ext4_add_entry(handle, dentry, inode);
if (!err) {
ext4_mark_inode_dirty(handle, inode);
d_instantiate(dentry, inode);
} else {
drop_nlink(inode);
iput(inode);
}
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
|
safe
| 452
|
skip_sfx(struct archive_read *a)
{
const void *h;
const char *p, *q;
size_t skip, total;
ssize_t bytes, window;
total = 0;
window = 4096;
while (total + window <= (1024 * 128)) {
h = __archive_read_ahead(a, window, &bytes);
if (h == NULL) {
/* Remaining bytes are less than window. */
window >>= 1;
if (window < 0x40)
goto fatal;
continue;
}
if (bytes < 0x40)
goto fatal;
p = h;
q = p + bytes;
/*
* Scan ahead until we find something that looks
* like the RAR header.
*/
while (p + 7 < q) {
if (memcmp(p, RAR_SIGNATURE, 7) == 0) {
skip = p - (const char *)h;
__archive_read_consume(a, skip);
return (ARCHIVE_OK);
}
p += 0x10;
}
skip = p - (const char *)h;
__archive_read_consume(a, skip);
total += skip;
}
fatal:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Couldn't find out RAR header");
return (ARCHIVE_FATAL);
}
|
safe
| 453
|
PJ_DEF(pj_status_t) pj_stun_uint64_attr_create(pj_pool_t *pool,
int attr_type,
const pj_timestamp *value,
pj_stun_uint64_attr **p_attr)
{
pj_stun_uint64_attr *attr;
PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint64_attr);
INIT_ATTR(attr, attr_type, 8);
if (value) {
attr->value.u32.hi = value->u32.hi;
attr->value.u32.lo = value->u32.lo;
}
*p_attr = attr;
return PJ_SUCCESS;
}
|
safe
| 454
|
int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
return 0;
spin_lock(&hugetlb_lock);
if (!PageHuge(page)) {
rc = 0;
goto out;
}
if (!page_count(page)) {
struct page *head = compound_head(page);
struct hstate *h = page_hstate(head);
int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0)
goto out;
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
*/
if (PageHWPoison(head) && page != head) {
SetPageHWPoison(page);
ClearPageHWPoison(head);
}
list_del(&head->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
h->max_huge_pages--;
update_and_free_page(h, head);
rc = 0;
}
out:
spin_unlock(&hugetlb_lock);
return rc;
}
|
safe
| 455
|
void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_device *dev;
int i;
if (list_empty(&trans->pending_chunks))
return;
/* In order to kick the device replace finish process */
mutex_lock(&fs_info->chunk_mutex);
list_for_each_entry(em, &trans->pending_chunks, list) {
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
dev = map->stripes[i].dev;
dev->commit_bytes_used = dev->bytes_used;
}
}
mutex_unlock(&fs_info->chunk_mutex);
}
|
safe
| 456
|
int con_debug_enter(struct vc_data *vc)
{
int ret = 0;
saved_fg_console = fg_console;
saved_last_console = last_console;
saved_want_console = want_console;
saved_vc_mode = vc->vc_mode;
saved_console_blanked = console_blanked;
vc->vc_mode = KD_TEXT;
console_blanked = 0;
if (vc->vc_sw->con_debug_enter)
ret = vc->vc_sw->con_debug_enter(vc);
#ifdef CONFIG_KGDB_KDB
/* Set the initial LINES variable if it is not already set */
if (vc->vc_rows < 999) {
int linecount;
char lns[4];
const char *setargs[3] = {
"set",
"LINES",
lns,
};
if (kdbgetintenv(setargs[0], &linecount)) {
snprintf(lns, 4, "%i", vc->vc_rows);
kdb_set(2, setargs);
}
}
if (vc->vc_cols < 999) {
int colcount;
char cols[4];
const char *setargs[3] = {
"set",
"COLUMNS",
cols,
};
if (kdbgetintenv(setargs[0], &colcount)) {
snprintf(cols, 4, "%i", vc->vc_cols);
kdb_set(2, setargs);
}
}
#endif /* CONFIG_KGDB_KDB */
return ret;
}
|
safe
| 457
|
TEST_F(QueryPlannerTest, ElemMatchEmbeddedAnd) {
// true means multikey
addIndex(BSON("a.b" << 1 << "a.c" << 1), true);
runQuery(fromjson("{a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
"{fetch: {filter: {a:{$elemMatch:{b:{$gte:2,$lt: 4},c:25}}}, node: "
"{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, "
"bounds: {'a.b': [[-Infinity,4,true,false]], "
"'a.c': [[25,25,true,true]]}}}}}");
assertSolutionExists(
"{fetch: {filter: {a:{$elemMatch:{b:{$gte:2,$lt: 4},c:25}}}, node: "
"{ixscan: {filter: null, pattern: {'a.b': 1, 'a.c': 1}, "
"bounds: {'a.b': [[2,Infinity,true,true]], "
"'a.c': [[25,25,true,true]]}}}}}");
}
|
safe
| 458
|
int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct tpm_cmd_t cmd;
int rc;
u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
if (chip == NULL)
return -ENODEV;
/* for buggy tpm, flush pcrs with extend to selected dummy */
if (tpm_suspend_pcr) {
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"extending dummy pcr before suspend");
}
/* now do the actual savestate */
cmd.header.in = savestate_header;
rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
"sending savestate before suspend");
return rc;
}
|
safe
| 459
|
errno_t sssctl_logs_fetch(struct sss_cmdline *cmdline,
struct sss_tool_ctx *tool_ctx,
void *pvt)
{
const char *file;
errno_t ret;
glob_t globbuf;
/* Parse command line. */
ret = sss_tool_popt_ex(cmdline, NULL, SSS_TOOL_OPT_OPTIONAL, NULL, NULL,
"FILE", "Output file", &file, NULL);
if (ret != EOK) {
DEBUG(SSSDBG_CRIT_FAILURE, "Unable to parse command arguments\n");
return ret;
}
globbuf.gl_offs = 3;
ret = glob(LOG_PATH"/*.log", GLOB_ERR|GLOB_DOOFFS, NULL, &globbuf);
if (ret != 0) {
DEBUG(SSSDBG_CRIT_FAILURE, "Unable to expand log files list\n");
return ret;
}
globbuf.gl_pathv[0] = discard_const_p(char, "tar");
globbuf.gl_pathv[1] = discard_const_p(char, "-czf");
globbuf.gl_pathv[2] = discard_const_p(char, file);
PRINT("Archiving log files into %s...\n", file);
ret = sssctl_run_command((const char * const*)globbuf.gl_pathv);
globfree(&globbuf);
if (ret != EOK) {
ERROR("Unable to archive log files\n");
return ret;
}
return EOK;
}
|
safe
| 460
|
options_from_list(w, priv)
struct wordlist *w;
int priv;
{
char *argv[MAXARGS];
option_t *opt;
int i, n, ret = 0;
struct wordlist *w0;
privileged_option = priv;
option_source = "secrets file";
option_priority = OPRIO_SECFILE;
while (w != NULL) {
opt = find_option(w->word);
if (opt == NULL) {
option_error("In secrets file: unrecognized option '%s'",
w->word);
goto err;
}
n = n_arguments(opt);
w0 = w;
for (i = 0; i < n; ++i) {
w = w->next;
if (w == NULL) {
option_error(
"In secrets file: too few parameters for option '%s'",
w0->word);
goto err;
}
argv[i] = w->word;
}
if (!process_option(opt, w0->word, argv))
goto err;
w = w->next;
}
ret = 1;
err:
return ret;
}
|
safe
| 461
|
RAMBlock *qemu_ram_block_from_host(struct uc_struct *uc, void *ptr,
bool round_offset, ram_addr_t *offset)
{
RAMBlock *block;
uint8_t *host = ptr;
block = uc->ram_list.mru_block;
if (block && block->host && host - block->host < block->max_length) {
goto found;
}
RAMBLOCK_FOREACH(block) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
continue;
}
if (host - block->host < block->max_length) {
goto found;
}
}
return NULL;
found:
*offset = (host - block->host);
if (round_offset) {
*offset &= TARGET_PAGE_MASK;
}
return block;
}
|
safe
| 462
|
void mysql_detach_stmt_list(LIST **stmt_list __attribute__((unused)),
const char *func_name __attribute__((unused)))
{
#ifdef MYSQL_CLIENT
/* Reset connection handle in all prepared statements. */
LIST *element= *stmt_list;
char buff[MYSQL_ERRMSG_SIZE];
DBUG_ENTER("mysql_detach_stmt_list");
my_snprintf(buff, sizeof(buff)-1, ER(CR_STMT_CLOSED), func_name);
for (; element; element= element->next)
{
MYSQL_STMT *stmt= (MYSQL_STMT *) element->data;
set_stmt_error(stmt, CR_STMT_CLOSED, unknown_sqlstate, buff);
stmt->mysql= 0;
/* No need to call list_delete for statement here */
}
*stmt_list= 0;
DBUG_VOID_RETURN;
#endif /* MYSQL_CLIENT */
|
safe
| 463
|
void OSD::handle_command(MCommand *m)
{
ConnectionRef con = m->get_connection();
Session *session = static_cast<Session *>(con->get_priv());
if (!session) {
con->send_message(new MCommandReply(m, -EPERM));
m->put();
return;
}
OSDCap& caps = session->caps;
session->put();
if (!caps.allow_all() || m->get_source().is_mon()) {
con->send_message(new MCommandReply(m, -EPERM));
m->put();
return;
}
Command *c = new Command(m->cmd, m->get_tid(), m->get_data(), con.get());
command_wq.queue(c);
m->put();
}
|
safe
| 464
|
flatpak_dir_create_system_child_oci_registry (FlatpakDir *self,
GLnxLockFile *file_lock,
GError **error)
{
g_autoptr(GFile) cache_dir = NULL;
g_autoptr(GFile) repo_dir = NULL;
g_autofree char *repo_url = NULL;
g_autofree char *tmpdir_name = NULL;
g_autoptr(FlatpakOciRegistry) new_registry = NULL;
g_assert (!self->user);
if (!flatpak_dir_ensure_repo (self, NULL, error))
return NULL;
cache_dir = flatpak_ensure_system_user_cache_dir_location (error);
if (cache_dir == NULL)
return NULL;
if (!flatpak_allocate_tmpdir (AT_FDCWD,
flatpak_file_get_path_cached (cache_dir),
"child-oci-", &tmpdir_name,
NULL,
file_lock,
NULL,
NULL, error))
return NULL;
repo_dir = g_file_get_child (cache_dir, tmpdir_name);
repo_url = g_file_get_uri (repo_dir);
new_registry = flatpak_oci_registry_new (repo_url, TRUE, -1,
NULL, error);
if (new_registry == NULL)
return NULL;
return g_steal_pointer (&new_registry);
}
|
safe
| 465
|
static void ikev2_padup_pre_encrypt(struct msg_digest *md,
pb_stream *e_pbs_cipher)
{
struct state *st = md->st;
struct state *pst = st;
if (st->st_clonedfrom != 0)
pst = state_with_serialno(st->st_clonedfrom);
/* pads things up to message size boundary */
{
size_t blocksize = pst->st_oakley.encrypter->enc_blocksize;
char *b = alloca(blocksize);
unsigned int i;
size_t padding = pad_up(pbs_offset(e_pbs_cipher), blocksize);
if (padding == 0)
padding = blocksize;
for (i = 0; i < padding; i++)
b[i] = i;
out_raw(b, padding, e_pbs_cipher, "padding and length");
}
}
|
safe
| 466
|
static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
u8 count)
{
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
hdr = nl80211hdr_put(msg, 0, 0, 0, notif);
if (!hdr) {
nlmsg_free(msg);
return;
}
if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
goto nla_put_failure;
if (nl80211_send_chandef(msg, chandef))
goto nla_put_failure;
if ((notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) &&
(nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_MLME, gfp);
return;
nla_put_failure:
nlmsg_free(msg);
}
|
safe
| 467
|
respip_nodata_answer(uint16_t qtype, enum respip_action action,
const struct reply_info *rep, size_t rrset_id,
struct reply_info** new_repp, struct regional* region)
{
struct reply_info* new_rep;
if(action == respip_refuse || action == respip_always_refuse) {
new_rep = make_new_reply_info(rep, region, 0, 0);
if(!new_rep)
return 0;
FLAGS_SET_RCODE(new_rep->flags, LDNS_RCODE_REFUSED);
*new_repp = new_rep;
return 1;
} else if(action == respip_static || action == respip_redirect ||
action == respip_always_nxdomain ||
action == respip_inform_redirect) {
/* Since we don't know about other types of the owner name,
* we generally return NOERROR/NODATA unless an NXDOMAIN action
* is explicitly specified. */
int rcode = (action == respip_always_nxdomain)?
LDNS_RCODE_NXDOMAIN:LDNS_RCODE_NOERROR;
/* We should empty the answer section except for any preceding
* CNAMEs (in that case rrset_id > 0). Type-ANY case is
* special as noted in respip_data_answer(). */
if(qtype == LDNS_RR_TYPE_ANY)
rrset_id = 0;
new_rep = make_new_reply_info(rep, region, rrset_id, rrset_id);
if(!new_rep)
return 0;
FLAGS_SET_RCODE(new_rep->flags, rcode);
*new_repp = new_rep;
return 1;
}
return 1;
}
|
safe
| 468
|
int main(int argc, char *argv[])
{
int i;
char *test_path = NULL;
#ifdef HAVE_SETLOCALE
setlocale(LC_ALL, "");
#endif
if (argc < 2) {
goto usage;
}
for (i = 1; i < argc; i++) {
if (!strcmp(argv[i], "--strip"))
conf.strip = 1;
else if (!strcmp(argv[i], "--env"))
conf.use_env = 1;
else
test_path = argv[i];
}
if (conf.use_env)
return use_env();
else
{
if (!test_path)
goto usage;
return use_conf(test_path);
}
usage:
fprintf(stderr, "argc =%d\n", argc);
fprintf(stderr, "usage: %s [--strip] [--env] test_dir\n", argv[0]);
return 2;
}
|
safe
| 469
|
static int hashtable_do_del(hashtable_t *hashtable,
const char *key, size_t hash)
{
pair_t *pair;
bucket_t *bucket;
size_t index;
index = hash & hashmask(hashtable->order);
bucket = &hashtable->buckets[index];
pair = hashtable_find_pair(hashtable, bucket, key, hash);
if(!pair)
return -1;
if(&pair->list == bucket->first && &pair->list == bucket->last)
bucket->first = bucket->last = &hashtable->list;
else if(&pair->list == bucket->first)
bucket->first = pair->list.next;
else if(&pair->list == bucket->last)
bucket->last = pair->list.prev;
list_remove(&pair->list);
json_decref(pair->value);
jsonp_free(pair);
hashtable->size--;
return 0;
}
|
safe
| 470
|
void preprocessNodes(std::vector<Proxy> &nodes, extra_settings &ext)
{
std::for_each(nodes.begin(), nodes.end(), [&ext](Proxy &x)
{
if(ext.remove_emoji)
x.Remark = trim(removeEmoji(x.Remark));
nodeRename(x, ext.rename_array, ext);
if(ext.add_emoji)
x.Remark = addEmoji(x, ext.emoji_array, ext);
});
if(ext.sort_flag)
{
bool failed = true;
if(ext.sort_script.size() && ext.authorized)
{
std::string script = ext.sort_script;
if(startsWith(script, "path:"))
script = fileGet(script.substr(5), false);
script_safe_runner(ext.js_runtime, ext.js_context, [&](qjs::Context &ctx)
{
try
{
ctx.eval(script);
auto compare = (std::function<int(const Proxy&, const Proxy&)>) ctx.eval("compare");
auto comparer = [&](const Proxy &a, const Proxy &b)
{
if(a.Type == ProxyType::Unknow)
return 1;
if(b.Type == ProxyType::Unknow)
return 0;
return compare(a, b);
};
std::stable_sort(nodes.begin(), nodes.end(), comparer);
failed = false;
}
catch(qjs::exception)
{
script_print_stack(ctx);
}
}, global.scriptCleanContext);
}
if(failed) std::stable_sort(nodes.begin(), nodes.end(), [](const Proxy &a, const Proxy &b)
{
return a.Remark < b.Remark;
});
}
}
|
safe
| 471
|
process_ellipse2(STREAM s, ELLIPSE2_ORDER * os, uint32 present, RD_BOOL delta)
{
BRUSH brush;
if (present & 0x0001)
rdp_in_coord(s, &os->left, delta);
if (present & 0x0002)
rdp_in_coord(s, &os->top, delta);
if (present & 0x0004)
rdp_in_coord(s, &os->right, delta);
if (present & 0x0008)
rdp_in_coord(s, &os->bottom, delta);
if (present & 0x0010)
in_uint8(s, os->opcode);
if (present & 0x0020)
in_uint8(s, os->fillmode);
if (present & 0x0040)
rdp_in_colour(s, &os->bgcolour);
if (present & 0x0080)
rdp_in_colour(s, &os->fgcolour);
rdp_parse_brush(s, &os->brush, present >> 8);
logger(Graphics, Debug,
"process_ellipse2(), l=%d, t=%d, r=%d, b=%d, op=0x%x, fm=%d, bs=%d, bg=0x%x, fg=0x%x",
os->left, os->top, os->right, os->bottom, os->opcode, os->fillmode, os->brush.style,
os->bgcolour, os->fgcolour);
setup_brush(&brush, &os->brush);
ui_ellipse(os->opcode - 1, os->fillmode, os->left, os->top, os->right - os->left,
os->bottom - os->top, &brush, os->bgcolour, os->fgcolour);
}
|
safe
| 472
|
static void mddev_put(struct mddev *mddev)
{
struct bio_set *bs = NULL;
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
* flush_workqueue() after mddev_find will
* succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
if (bs)
bioset_free(bs);
}
|
safe
| 473
|
static inline const char* cli_memmem(const char *haystack, unsigned hlen,
const unsigned char *needle, unsigned nlen)
{
const char *p;
unsigned char c;
if (!needle || !haystack) {
return NULL;
}
c = *needle++;
if (nlen == 1)
return memchr(haystack, c, hlen);
while (hlen >= nlen) {
p = haystack;
haystack = memchr(haystack, c, hlen - nlen + 1);
if (!haystack)
return NULL;
hlen -= haystack+1 - p;
p = haystack + 1;
if (!memcmp(p, needle, nlen-1))
return haystack;
haystack = p;
}
return NULL;
}
|
safe
| 474
|
check_owner_password_V4(std::string& user_password,
std::string const& owner_password,
QPDF::EncryptionData const& data)
{
// Algorithm 3.7 from the PDF 1.7 Reference Manual
unsigned char key[OU_key_bytes_V4];
compute_O_rc4_key(user_password, owner_password, data, key);
unsigned char O_data[key_bytes];
memcpy(O_data, QUtil::unsigned_char_pointer(data.getO()), key_bytes);
std::string k1(reinterpret_cast<char*>(key), OU_key_bytes_V4);
pad_short_parameter(k1, QIntC::to_size(data.getLengthBytes()));
iterate_rc4(O_data, key_bytes, QUtil::unsigned_char_pointer(k1),
data.getLengthBytes(),
(data.getR() >= 3) ? 20 : 1, true);
std::string new_user_password =
std::string(reinterpret_cast<char*>(O_data), key_bytes);
bool result = false;
if (check_user_password(new_user_password, data))
{
result = true;
user_password = new_user_password;
}
return result;
}
|
safe
| 475
|
static int qrtr_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct qrtr_sock *ipc;
if (!sk)
return 0;
lock_sock(sk);
ipc = qrtr_sk(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_orphan(sk);
sock->sk = NULL;
if (!sock_flag(sk, SOCK_ZAPPED))
qrtr_port_remove(ipc);
skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
return 0;
}
|
safe
| 476
|
inline void LogSoftmax(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_multiplier, int32 input_left_shift,
int32 reverse_scaling_divisor,
int32 reverse_scaling_right_shift, int diff_min,
uint8* output_data, const RuntimeShape& output_shape) {
SoftmaxParams params;
params.input_multiplier = input_multiplier;
params.input_left_shift = input_left_shift;
params.reverse_scaling_divisor = reverse_scaling_divisor;
params.reverse_scaling_right_shift = reverse_scaling_right_shift;
params.diff_min = diff_min;
LogSoftmax(params, input_shape, input_data, output_shape, output_data);
}
|
safe
| 477
|
static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
{
LSIState *s = LSI53C895A(dev);
DeviceState *d = DEVICE(dev);
uint8_t *pci_conf;
pci_conf = dev->config;
/* PCI latency timer = 255 */
pci_conf[PCI_LATENCY_TIMER] = 0xff;
/* Interrupt pin A */
pci_conf[PCI_INTERRUPT_PIN] = 0x01;
memory_region_init_io(&s->mmio_io, OBJECT(s), &lsi_mmio_ops, s,
"lsi-mmio", 0x400);
memory_region_init_io(&s->ram_io, OBJECT(s), &lsi_ram_ops, s,
"lsi-ram", 0x2000);
memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s,
"lsi-io", 256);
address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io");
qdev_init_gpio_out(d, &s->ext_irq, 1);
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_io);
pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio_io);
pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io);
QTAILQ_INIT(&s->queue);
scsi_bus_new(&s->bus, sizeof(s->bus), d, &lsi_scsi_info, NULL);
}
|
safe
| 478
|
gif_main_loop (GifContext *context)
{
gint retval = 0;
do {
switch (context->state) {
case GIF_START:
LOG("start\n");
retval = gif_init (context);
break;
case GIF_GET_COLORMAP:
LOG("get_colormap\n");
retval = gif_get_colormap (context);
if (retval == 0)
context->state = GIF_GET_NEXT_STEP;
break;
case GIF_GET_NEXT_STEP:
LOG("next_step\n");
retval = gif_get_next_step (context);
break;
case GIF_GET_FRAME_INFO:
LOG("frame_info\n");
retval = gif_get_frame_info (context);
break;
case GIF_GET_EXTENTION:
LOG("get_extension\n");
retval = gif_get_extension (context);
if (retval == 0)
context->state = GIF_GET_NEXT_STEP;
break;
case GIF_GET_COLORMAP2:
LOG("get_colormap2\n");
retval = gif_get_colormap2 (context);
if (retval == 0)
gif_set_prepare_lzw (context);
break;
case GIF_PREPARE_LZW:
LOG("prepare_lzw\n");
retval = gif_prepare_lzw (context);
break;
case GIF_LZW_FILL_BUFFER:
LOG("fill_buffer\n");
retval = gif_lzw_fill_buffer (context);
break;
case GIF_LZW_CLEAR_CODE:
LOG("clear_code\n");
retval = gif_lzw_clear_code (context);
break;
case GIF_GET_LZW:
LOG("get_lzw\n");
retval = gif_get_lzw (context);
break;
case GIF_DONE:
LOG("done\n");
default:
retval = 0;
goto done;
};
} while ((retval == 0) || (retval == -3));
done:
return retval;
}
|
safe
| 479
|
static void close_connection_now(h2o_http2_conn_t *conn)
{
h2o_http2_stream_t *stream;
assert(!h2o_timeout_is_linked(&conn->_write.timeout_entry));
kh_foreach_value(conn->streams, stream, { h2o_http2_stream_close(conn, stream); });
assert(conn->num_streams.pull.open == 0);
assert(conn->num_streams.pull.half_closed == 0);
assert(conn->num_streams.pull.send_body == 0);
assert(conn->num_streams.push.half_closed == 0);
assert(conn->num_streams.push.send_body == 0);
assert(conn->num_streams.priority.open == 0);
kh_destroy(h2o_http2_stream_t, conn->streams);
assert(conn->_http1_req_input == NULL);
h2o_hpack_dispose_header_table(&conn->_input_header_table);
h2o_hpack_dispose_header_table(&conn->_output_header_table);
assert(h2o_linklist_is_empty(&conn->_pending_reqs));
h2o_timeout_unlink(&conn->_timeout_entry);
h2o_buffer_dispose(&conn->_write.buf);
if (conn->_write.buf_in_flight != NULL)
h2o_buffer_dispose(&conn->_write.buf_in_flight);
h2o_http2_scheduler_dispose(&conn->scheduler);
assert(h2o_linklist_is_empty(&conn->_write.streams_to_proceed));
assert(!h2o_timeout_is_linked(&conn->_write.timeout_entry));
if (conn->_headers_unparsed != NULL)
h2o_buffer_dispose(&conn->_headers_unparsed);
if (conn->casper != NULL)
h2o_http2_casper_destroy(conn->casper);
h2o_linklist_unlink(&conn->_conns);
if (conn->sock != NULL)
h2o_socket_close(conn->sock);
free(conn);
}
|
safe
| 480
|
static void free_playlist_list(HLSContext *c)
{
int i;
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
free_segment_list(pls);
free_init_section_list(pls);
av_freep(&pls->main_streams);
av_freep(&pls->renditions);
av_freep(&pls->id3_buf);
av_dict_free(&pls->id3_initial);
ff_id3v2_free_extra_meta(&pls->id3_deferred_extra);
av_freep(&pls->init_sec_buf);
av_packet_unref(&pls->pkt);
av_freep(&pls->pb.buffer);
if (pls->input)
ff_format_io_close(c->ctx, &pls->input);
if (pls->ctx) {
pls->ctx->pb = NULL;
avformat_close_input(&pls->ctx);
}
av_free(pls);
}
av_freep(&c->playlists);
av_freep(&c->cookies);
av_freep(&c->user_agent);
av_freep(&c->headers);
av_freep(&c->http_proxy);
c->n_playlists = 0;
}
|
safe
| 481
|
static void handle_tx(struct vhost_net *net)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
struct socket *sock;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
sock = vq->private_data;
if (!sock)
goto out;
if (!vq_meta_prefetch(vq))
goto out;
vhost_disable_notify(&net->dev, vq);
vhost_net_disable_vq(net, vq);
if (vhost_sock_zcopy(sock))
handle_tx_zerocopy(net, sock);
else
handle_tx_copy(net, sock);
out:
mutex_unlock(&vq->mutex);
}
|
safe
| 482
|
int rdbSaveInfoAuxFields(rio *rdb, int rdbflags, rdbSaveInfo *rsi) {
int redis_bits = (sizeof(void*) == 8) ? 64 : 32;
int aof_preamble = (rdbflags & RDBFLAGS_AOF_PREAMBLE) != 0;
/* Add a few fields about the state when the RDB was created. */
if (rdbSaveAuxFieldStrStr(rdb,"redis-ver",REDIS_VERSION) == -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"redis-bits",redis_bits) == -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"ctime",time(NULL)) == -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"used-mem",zmalloc_used_memory()) == -1) return -1;
/* Handle saving options that generate aux fields. */
if (rsi) {
if (rdbSaveAuxFieldStrInt(rdb,"repl-stream-db",rsi->repl_stream_db)
== -1) return -1;
if (rdbSaveAuxFieldStrStr(rdb,"repl-id",server.replid)
== -1) return -1;
if (rdbSaveAuxFieldStrInt(rdb,"repl-offset",server.master_repl_offset)
== -1) return -1;
}
if (rdbSaveAuxFieldStrInt(rdb,"aof-preamble",aof_preamble) == -1) return -1;
return 1;
}
|
safe
| 483
|
add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
struct cifs_open_parms *oparms)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
/* indicate that we don't need to relock the file */
oparms->reconnect = false;
iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
sizeof(struct create_durable_handle_reconnect_v2));
inc_rfc1001_len(&req->hdr,
sizeof(struct create_durable_handle_reconnect_v2));
*num_iovec = num + 1;
return 0;
}
|
safe
| 484
|
BOOL rdp_send(rdpRdp* rdp, STREAM* s, UINT16 channel_id)
{
UINT16 length;
UINT32 sec_bytes;
BYTE* sec_hold;
length = stream_get_length(s);
stream_set_pos(s, 0);
rdp_write_header(rdp, s, length, channel_id);
sec_bytes = rdp_get_sec_bytes(rdp);
sec_hold = s->p;
stream_seek(s, sec_bytes);
s->p = sec_hold;
length += rdp_security_stream_out(rdp, s, length);
stream_set_pos(s, length);
if (transport_write(rdp->transport, s) < 0)
return FALSE;
return TRUE;
}
|
safe
| 485
|
int mnt_optstr_fix_user(char **optstr)
{
char *username;
struct libmnt_optloc ol = MNT_INIT_OPTLOC;
int rc = 0;
DBG(CXT, ul_debug("fixing user"));
rc = mnt_optstr_locate_option(*optstr, "user", &ol);
if (rc)
return rc == 1 ? 0 : rc; /* 1: user= not found */
username = mnt_get_username(getuid());
if (!username)
return -ENOMEM;
if (!ol.valsz || (ol.value && strncmp(ol.value, username, ol.valsz) != 0)) {
if (ol.valsz)
/* remove old value */
mnt_optstr_remove_option_at(optstr, ol.value, ol.end);
rc = insert_value(optstr, ol.value ? ol.value : ol.end,
username, NULL);
}
free(username);
return rc;
}
|
safe
| 486
|
static int cxusb_aver_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
if (!onoff)
return cxusb_ctrl_msg(d, CMD_POWER_OFF, NULL, 0, NULL, 0);
if (d->state == DVB_USB_STATE_INIT &&
usb_set_interface(d->udev, 0, 0) < 0)
err("set interface failed");
do {} while (!(ret = cxusb_ctrl_msg(d, CMD_POWER_ON, NULL, 0, NULL, 0)) &&
!(ret = cxusb_ctrl_msg(d, 0x15, NULL, 0, NULL, 0)) &&
!(ret = cxusb_ctrl_msg(d, 0x17, NULL, 0, NULL, 0)) && 0);
if (!ret) {
/* FIXME: We don't know why, but we need to configure the
* lgdt3303 with the register settings below on resume */
int i;
u8 buf, bufs[] = {
0x0e, 0x2, 0x00, 0x7f,
0x0e, 0x2, 0x02, 0xfe,
0x0e, 0x2, 0x02, 0x01,
0x0e, 0x2, 0x00, 0x03,
0x0e, 0x2, 0x0d, 0x40,
0x0e, 0x2, 0x0e, 0x87,
0x0e, 0x2, 0x0f, 0x8e,
0x0e, 0x2, 0x10, 0x01,
0x0e, 0x2, 0x14, 0xd7,
0x0e, 0x2, 0x47, 0x88,
};
msleep(20);
for (i = 0; i < sizeof(bufs)/sizeof(u8); i += 4/sizeof(u8)) {
ret = cxusb_ctrl_msg(d, CMD_I2C_WRITE,
bufs+i, 4, &buf, 1);
if (ret)
break;
if (buf != 0x8)
return -EREMOTEIO;
}
}
return ret;
}
|
safe
| 487
|
char *get_proxy(char *url, struct pool *pool)
{
pool->rpc_proxy = NULL;
char *split;
int plen, len, i;
for (i = 0; proxynames[i].name; i++) {
plen = strlen(proxynames[i].name);
if (strncmp(url, proxynames[i].name, plen) == 0) {
if (!(split = strchr(url, '|')))
return url;
*split = '\0';
len = split - url;
pool->rpc_proxy = (char *)malloc(1 + len - plen);
if (!(pool->rpc_proxy))
quithere(1, "Failed to malloc rpc_proxy");
strcpy(pool->rpc_proxy, url + plen);
extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port);
pool->rpc_proxytype = proxynames[i].proxytype;
url = split + 1;
break;
}
}
return url;
}
|
safe
| 488
|
static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
uuid[8] = 0;
}
if (uuid[8] == 0)
generate_random_uuid(uuid);
sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
"%02x%02x%02x%02x%02x%02x",
uuid[0], uuid[1], uuid[2], uuid[3],
uuid[4], uuid[5], uuid[6], uuid[7],
uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15]);
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
}
|
safe
| 489
|
static int open_dev_autofs(Manager *m) {
struct autofs_dev_ioctl param;
assert(m);
if (m->dev_autofs_fd >= 0)
return m->dev_autofs_fd;
label_fix("/dev/autofs", false, false);
m->dev_autofs_fd = open("/dev/autofs", O_CLOEXEC|O_RDONLY);
if (m->dev_autofs_fd < 0)
return log_error_errno(errno, "Failed to open /dev/autofs: %m");
init_autofs_dev_ioctl(¶m);
if (ioctl(m->dev_autofs_fd, AUTOFS_DEV_IOCTL_VERSION, ¶m) < 0) {
m->dev_autofs_fd = safe_close(m->dev_autofs_fd);
return -errno;
}
log_debug("Autofs kernel version %i.%i", param.ver_major, param.ver_minor);
return m->dev_autofs_fd;
}
|
safe
| 490
|
static int iscsi_check_for_auth_key(char *key)
{
/*
* RFC 1994
*/
if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
!strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
!strcmp(key, "CHAP_R"))
return 1;
/*
* RFC 2945
*/
if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
!strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
!strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
!strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
return 1;
return 0;
}
|
safe
| 491
|
nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_readdirargs *args)
{
int len;
u32 max_blocksize = svc_max_payload(rqstp);
p = decode_fh(p, &args->fh);
if (!p)
return 0;
p = xdr_decode_hyper(p, &args->cookie);
args->verf = p; p += 2;
args->dircount = ntohl(*p++);
args->count = ntohl(*p++);
len = args->count = min(args->count, max_blocksize);
while (len > 0) {
struct page *p = *(rqstp->rq_next_page++);
if (!args->buffer)
args->buffer = page_address(p);
len -= PAGE_SIZE;
}
return xdr_argsize_check(rqstp, p);
}
|
safe
| 492
|
static void _slurm_rpc_resv_delete(slurm_msg_t * msg)
{
/* init */
int error_code = SLURM_SUCCESS;
DEF_TIMERS;
reservation_name_msg_t *resv_desc_ptr = (reservation_name_msg_t *)
msg->data;
/* Locks: read job, write node */
slurmctld_lock_t node_write_lock = {
NO_LOCK, READ_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
uid_t uid = g_slurm_auth_get_uid(msg->auth_cred,
slurmctld_config.auth_info);
START_TIMER;
debug2("Processing RPC: REQUEST_DELETE_RESERVATION from uid=%d", uid);
if (!validate_operator(uid)) {
error_code = ESLURM_USER_ID_MISSING;
error("Security violation, DELETE_RESERVATION RPC from uid=%d",
uid);
} else if (!resv_desc_ptr->name) {
error_code = ESLURM_INVALID_PARTITION_NAME;
error("Invalid DELETE_RESERVATION RPC from uid=%d, name is null",
uid);
}
if (error_code == SLURM_SUCCESS) {
/* do RPC call */
lock_slurmctld(node_write_lock);
error_code = delete_resv(resv_desc_ptr);
unlock_slurmctld(node_write_lock);
END_TIMER2("_slurm_rpc_resv_delete");
}
/* return result */
if (error_code) {
info("_slurm_rpc_delete_reservation partition=%s: %s",
resv_desc_ptr->name, slurm_strerror(error_code));
slurm_send_rc_msg(msg, error_code);
} else {
info("_slurm_rpc_delete_reservation complete for %s %s",
resv_desc_ptr->name, TIME_STR);
slurm_send_rc_msg(msg, SLURM_SUCCESS);
queue_job_scheduler();
}
}
|
safe
| 493
|
static enum TIFFReadDirEntryErr TIFFReadDirEntryDataAndRealloc(
TIFF* tif, uint64 offset, tmsize_t size, void** pdest)
{
#if SIZEOF_VOIDP == 8 || SIZEOF_SIZE_T == 8
tmsize_t threshold = INITIAL_THRESHOLD;
#endif
tmsize_t already_read = 0;
assert( !isMapped(tif) );
if (!SeekOK(tif,offset))
return(TIFFReadDirEntryErrIo);
/* On 64 bit processes, read first a maximum of 1 MB, then 10 MB, etc */
/* so as to avoid allocating too much memory in case the file is too */
/* short. We could ask for the file size, but this might be */
/* expensive with some I/O layers (think of reading a gzipped file) */
/* Restrict to 64 bit processes, so as to avoid reallocs() */
/* on 32 bit processes where virtual memory is scarce. */
while( already_read < size )
{
void* new_dest;
tmsize_t bytes_read;
tmsize_t to_read = size - already_read;
#if SIZEOF_VOIDP == 8 || SIZEOF_SIZE_T == 8
if( to_read >= threshold && threshold < MAX_THRESHOLD )
{
to_read = threshold;
threshold *= THRESHOLD_MULTIPLIER;
}
#endif
new_dest = (uint8*) _TIFFrealloc(
*pdest, already_read + to_read);
if( new_dest == NULL )
{
TIFFErrorExt(tif->tif_clientdata, tif->tif_name,
"Failed to allocate memory for %s "
"(%ld elements of %ld bytes each)",
"TIFFReadDirEntryArray",
(long) 1, (long) already_read + to_read);
return TIFFReadDirEntryErrAlloc;
}
*pdest = new_dest;
bytes_read = TIFFReadFile(tif,
(char*)*pdest + already_read, to_read);
already_read += bytes_read;
if (bytes_read != to_read) {
return TIFFReadDirEntryErrIo;
}
}
return TIFFReadDirEntryErrOk;
}
|
safe
| 494
|
Controller::sendHeaderToAppWithHttpProtocolAndWritev(Request *req, ssize_t &bytesWritten,
HttpHeaderConstructionCache &cache)
{
unsigned int maxbuffers = std::min<unsigned int>(
5 + req->headers.size() * 4 + 4, IOV_MAX);
struct iovec *buffers = (struct iovec *) psg_palloc(req->pool,
sizeof(struct iovec) * maxbuffers);
unsigned int nbuffers, dataSize;
if (constructHeaderBuffersForHttpProtocol(req, buffers,
maxbuffers, nbuffers, dataSize, cache))
{
ssize_t ret;
do {
ret = writev(req->session->fd(), buffers, nbuffers);
} while (ret == -1 && errno == EINTR);
bytesWritten = ret;
return ret == (ssize_t) dataSize;
} else {
bytesWritten = 0;
return false;
}
}
|
safe
| 495
|
static int sja1105et_reset_cmd(const void *ctx, const void *data)
{
const struct sja1105_private *priv = ctx;
const struct sja1105_reset_cmd *reset = data;
const struct sja1105_regs *regs = priv->info->regs;
struct device *dev = priv->ds->dev;
u8 packed_buf[SJA1105_SIZE_RESET_CMD];
if (reset->switch_rst ||
reset->cfg_rst ||
reset->car_rst ||
reset->otp_rst ||
reset->por_rst) {
dev_err(dev, "Only warm and cold reset is supported "
"for SJA1105 E/T!\n");
return -EINVAL;
}
if (reset->warm_rst)
dev_dbg(dev, "Warm reset requested\n");
if (reset->cold_rst)
dev_dbg(dev, "Cold reset requested\n");
sja1105et_reset_cmd_pack(packed_buf, reset);
return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
packed_buf, SJA1105_SIZE_RESET_CMD);
}
|
safe
| 496
|
PJ_DEF(pjmedia_sdp_media*) pjmedia_sdp_media_clone(
pj_pool_t *pool,
const pjmedia_sdp_media *rhs)
{
unsigned int i;
pjmedia_sdp_media *m = PJ_POOL_ALLOC_T(pool, pjmedia_sdp_media);
PJ_ASSERT_RETURN(m != NULL, NULL);
pj_strdup (pool, &m->desc.media, &rhs->desc.media);
m->desc.port = rhs->desc.port;
m->desc.port_count = rhs->desc.port_count;
pj_strdup (pool, &m->desc.transport, &rhs->desc.transport);
m->desc.fmt_count = rhs->desc.fmt_count;
for (i=0; i<rhs->desc.fmt_count; ++i)
pj_strdup(pool, &m->desc.fmt[i], &rhs->desc.fmt[i]);
if (rhs->conn) {
m->conn = pjmedia_sdp_conn_clone (pool, rhs->conn);
PJ_ASSERT_RETURN(m->conn != NULL, NULL);
} else {
m->conn = NULL;
}
m->bandw_count = rhs->bandw_count;
for (i=0; i < rhs->bandw_count; ++i) {
m->bandw[i] = pjmedia_sdp_bandw_clone (pool, rhs->bandw[i]);
PJ_ASSERT_RETURN(m->bandw[i] != NULL, NULL);
}
m->attr_count = rhs->attr_count;
for (i=0; i < rhs->attr_count; ++i) {
m->attr[i] = pjmedia_sdp_attr_clone (pool, rhs->attr[i]);
PJ_ASSERT_RETURN(m->attr[i] != NULL, NULL);
}
return m;
}
|
safe
| 497
|
static int tg_set_rt_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
*/
if (tg == &root_task_group && rt_runtime == 0)
return -EINVAL;
/* No period doesn't make any sense. */
if (rt_period == 0)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
}
|
safe
| 498
|
get_lambda_tv_and_compile(
char_u **arg,
typval_T *rettv,
int types_optional,
evalarg_T *evalarg)
{
int r;
ufunc_T *ufunc;
int save_sc_version = current_sctx.sc_version;
// Get the funcref in "rettv".
current_sctx.sc_version = SCRIPT_VERSION_VIM9;
r = get_lambda_tv(arg, rettv, types_optional, evalarg);
current_sctx.sc_version = save_sc_version;
if (r != OK)
return r;
// "rettv" will now be a partial referencing the function.
ufunc = rettv->vval.v_partial->pt_func;
// Compile it here to get the return type. The return type is optional,
// when it's missing use t_unknown. This is recognized in
// compile_return().
if (ufunc->uf_ret_type == NULL || ufunc->uf_ret_type->tt_type == VAR_VOID)
ufunc->uf_ret_type = &t_unknown;
compile_def_function(ufunc, FALSE, CT_NONE, NULL);
if (ufunc->uf_def_status == UF_COMPILED)
{
// The return type will now be known.
set_function_type(ufunc);
return OK;
}
clear_tv(rettv);
return FAIL;
}
|
safe
| 499
|
static void
yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, void *yyscanner, RE_LEX_ENVIRONMENT *lex_env)
{
unsigned long int yylno = yyrline[yyrule];
int yynrhs = yyr2[yyrule];
int yyi;
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
yy_symbol_print (stderr,
yystos[yyssp[yyi + 1 - yynrhs]],
&(yyvsp[(yyi + 1) - (yynrhs)])
, yyscanner, lex_env);
YYFPRINTF (stderr, "\n");
|
safe
| 500
|
GF_Err moov_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_MovieBox *ptr = (GF_MovieBox *)s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
if (ptr->mvhd) {
e = gf_isom_box_write((GF_Box *) ptr->mvhd, bs);
if (e) return e;
}
if (ptr->iods) {
e = gf_isom_box_write((GF_Box *) ptr->iods, bs);
if (e) return e;
}
if (ptr->meta) {
e = gf_isom_box_write((GF_Box *) ptr->meta, bs);
if (e) return e;
}
#ifndef GPAC_DISABLE_ISOM_FRAGMENTS
if (ptr->mvex) {
e = gf_isom_box_write((GF_Box *) ptr->mvex, bs);
if (e) return e;
}
#endif
e = gf_isom_box_array_write(s, ptr->trackList, bs);
if (e) return e;
if (ptr->udta) {
e = gf_isom_box_write((GF_Box *) ptr->udta, bs);
if (e) return e;
}
return GF_OK;
}
|
safe
| 501
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.