func
stringlengths 12
2.67k
| cwe
stringclasses 7
values | __index_level_0__
int64 0
20k
|
|---|---|---|
delegpt_add_rrset_A(struct delegpt* dp, struct regional* region,
struct ub_packed_rrset_key* ak, uint8_t lame, int* additions)
{
struct packed_rrset_data* d=(struct packed_rrset_data*)ak->entry.data;
size_t i;
struct sockaddr_in sa;
socklen_t len = (socklen_t)sizeof(sa);
log_assert(!dp->dp_type_mlc);
memset(&sa, 0, len);
sa.sin_family = AF_INET;
sa.sin_port = (in_port_t)htons(UNBOUND_DNS_PORT);
for(i=0; i<d->count; i++) {
if(d->rr_len[i] != 2 + INET_SIZE)
continue;
memmove(&sa.sin_addr, d->rr_data[i]+2, INET_SIZE);
if(!delegpt_add_target(dp, region, ak->rk.dname,
ak->rk.dname_len, (struct sockaddr_storage*)&sa,
len, (d->security==sec_status_bogus), lame, additions))
return 0;
}
return 1;
}
|
safe
| 301
|
static int sd_sync_cache(struct scsi_disk *sdkp)
{
int retries, res;
struct scsi_device *sdp = sdkp->device;
struct scsi_sense_hdr sshdr;
if (!scsi_device_online(sdp))
return -ENODEV;
for (retries = 3; retries > 0; --retries) {
unsigned char cmd[10] = { 0 };
cmd[0] = SYNCHRONIZE_CACHE;
/*
* Leave the rest of the command zero to indicate
* flush everything.
*/
res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL);
if (res == 0)
break;
}
if (res) {
sd_print_result(sdkp, res);
if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr);
}
if (res)
return -EIO;
return 0;
}
|
safe
| 302
|
static int ohci_queue_iso(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags;
int ret;
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
else if (ctx->context.ohci->use_dualbuffer)
ret = ohci_queue_iso_receive_dualbuffer(base, packet,
buffer, payload);
else
ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer, payload);
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
return ret;
}
|
safe
| 303
|
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
int rc = 0;
vcpu_load(vcpu);
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
rc = -EINVAL;
goto out;
}
if (!sclp.has_gpere) {
rc = -EINVAL;
goto out;
}
if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control;
/* enforce guest PER */
kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg);
} else {
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
vcpu->arch.guestdbg.last_bp = 0;
}
if (rc) {
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
}
out:
vcpu_put(vcpu);
return rc;
}
|
safe
| 304
|
static void FVSimplify(FontView *fv,int type) {
static struct simplifyinfo smpls[] = {
{ sf_normal, 0, 0, 0, 0, 0, 0 },
{ sf_normal,.75,.05,0,-1, 0, 0 },
{ sf_normal,.75,.05,0,-1, 0, 0 }};
struct simplifyinfo *smpl = &smpls[type+1];
if ( smpl->linelenmax==-1 || (type==0 && !smpl->set_as_default)) {
smpl->err = (fv->b.sf->ascent+fv->b.sf->descent)/1000.;
smpl->linelenmax = (fv->b.sf->ascent+fv->b.sf->descent)/100.;
}
if ( type==1 ) {
if ( !SimplifyDlg(fv->b.sf,smpl))
return;
if ( smpl->set_as_default )
smpls[1] = *smpl;
}
_FVSimplify((FontViewBase *) fv,smpl);
}
|
safe
| 305
|
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe,
struct io_submit_state *state)
{
unsigned int sqe_flags;
int id;
req->opcode = READ_ONCE(sqe->opcode);
req->user_data = READ_ONCE(sqe->user_data);
req->io = NULL;
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
req->task = current;
req->result = 0;
if (unlikely(req->opcode >= IORING_OP_LAST))
return -EINVAL;
if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
return -EFAULT;
sqe_flags = READ_ONCE(sqe->flags);
/* enforce forwards compatibility on users */
if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
return -EINVAL;
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
!io_op_defs[req->opcode].buffer_select)
return -EOPNOTSUPP;
id = READ_ONCE(sqe->personality);
if (id) {
io_req_init_async(req);
req->work.creds = idr_find(&ctx->personality_idr, id);
if (unlikely(!req->work.creds))
return -EINVAL;
get_cred(req->work.creds);
}
/* same numerical values with corresponding REQ_F_*, safe to copy */
req->flags |= sqe_flags;
if (!io_op_defs[req->opcode].needs_file)
return 0;
return io_req_set_file(state, req, READ_ONCE(sqe->fd));
}
|
safe
| 306
|
static bool _ber_read_OID_String_impl(TALLOC_CTX *mem_ctx, DATA_BLOB blob,
char **OID, size_t *bytes_eaten)
{
int i;
uint8_t *b;
unsigned int v;
char *tmp_oid = NULL;
if (blob.length < 2) return false;
b = blob.data;
tmp_oid = talloc_asprintf(mem_ctx, "%u", b[0]/40);
if (!tmp_oid) goto nomem;
tmp_oid = talloc_asprintf_append_buffer(tmp_oid, ".%u", b[0]%40);
if (!tmp_oid) goto nomem;
if (bytes_eaten != NULL) {
*bytes_eaten = 0;
}
for(i = 1, v = 0; i < blob.length; i++) {
v = (v<<7) | (b[i]&0x7f);
if ( ! (b[i] & 0x80)) {
tmp_oid = talloc_asprintf_append_buffer(tmp_oid, ".%u", v);
v = 0;
if (bytes_eaten)
*bytes_eaten = i+1;
}
if (!tmp_oid) goto nomem;
}
*OID = tmp_oid;
return true;
nomem:
return false;
}
|
safe
| 307
|
dissect_kafka_offset_commit_response_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, kafka_api_version_t api_version)
{
proto_item *subti;
proto_tree *subtree;
int topic_start, topic_len;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
/* topic */
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, api_version >= 8,
&topic_start, &topic_len);
/* [partition_response] */
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version >= 8, api_version,
&dissect_kafka_offset_commit_response_partition_response, NULL);
if (api_version >= 8) {
offset = dissect_kafka_tagged_fields(tvb, pinfo, subtree, offset, 0);
}
proto_item_set_end(subti, tvb, offset);
proto_item_append_text(subti, " (Name=%s)",
tvb_get_string_enc(wmem_packet_scope(), tvb,
topic_start, topic_len, ENC_UTF_8));
return offset;
}
|
safe
| 308
|
static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family,
struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = NULL;
struct smack_known *skp = NULL;
netlbl_secattr_init(&secattr);
if (sk)
ssp = sk->sk_security;
if (netlbl_skbuff_getattr(skb, family, &secattr) == 0) {
skp = smack_from_secattr(&secattr, ssp);
if (secattr.flags & NETLBL_SECATTR_CACHEABLE)
netlbl_cache_add(skb, family, &skp->smk_netlabel);
}
netlbl_secattr_destroy(&secattr);
return skp;
}
|
safe
| 309
|
GF_Err fpar_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
u32 i;
FilePartitionBox *ptr = (FilePartitionBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
gf_bs_write_int(bs, ptr->itemID, ptr->version ? 32 : 16);
gf_bs_write_u16(bs, ptr->packet_payload_size);
gf_bs_write_u8(bs, 0);
gf_bs_write_u8(bs, ptr->FEC_encoding_ID);
gf_bs_write_u16(bs, ptr->FEC_instance_ID);
gf_bs_write_u16(bs, ptr->max_source_block_length);
gf_bs_write_u16(bs, ptr->encoding_symbol_length);
gf_bs_write_u16(bs, ptr->max_number_of_encoding_symbols);
if (ptr->scheme_specific_info) {
gf_bs_write_data(bs, ptr->scheme_specific_info, (u32)strlen(ptr->scheme_specific_info) );
}
//null terminated string
gf_bs_write_u8(bs, 0);
gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16);
for (i=0;i < ptr->nb_entries; i++) {
gf_bs_write_u16(bs, ptr->entries[i].block_count);
gf_bs_write_u32(bs, ptr->entries[i].block_size);
}
return GF_OK;
}
|
safe
| 310
|
static int neightbl_fill_param_info(struct neigh_table *tbl,
struct neigh_parms *parms,
struct sk_buff *skb,
struct netlink_callback *cb)
{
struct ndtmsg *ndtmsg;
struct nlmsghdr *nlh;
nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
NLM_F_MULTI);
ndtmsg = NLMSG_DATA(nlh);
read_lock_bh(&tbl->lock);
ndtmsg->ndtm_family = tbl->family;
ndtmsg->ndtm_pad1 = 0;
ndtmsg->ndtm_pad2 = 0;
RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
if (neightbl_fill_parms(skb, parms) < 0)
goto rtattr_failure;
read_unlock_bh(&tbl->lock);
return NLMSG_END(skb, nlh);
rtattr_failure:
read_unlock_bh(&tbl->lock);
return NLMSG_CANCEL(skb, nlh);
nlmsg_failure:
return -1;
}
|
safe
| 311
|
static void smap_gather_stats(struct vm_area_struct *vma,
struct mem_size_stats *mss)
{
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
#ifdef CONFIG_HUGETLB_PAGE
.hugetlb_entry = smaps_hugetlb_range,
#endif
.mm = vma->vm_mm,
};
smaps_walk.private = mss;
#ifdef CONFIG_SHMEM
/* In case of smaps_rollup, reset the value from previous vma */
mss->check_shmem_swap = false;
if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
/*
* For shared or readonly shmem mappings we know that all
* swapped out pages belong to the shmem object, and we can
* obtain the swap value much more efficiently. For private
* writable mappings, we might have COW pages that are
* not affected by the parent swapped out pages of the shmem
* object, so we have to distinguish them during the page walk.
* Unless we know that the shmem object (or the part mapped by
* our VMA) has no swapped out pages at all.
*/
unsigned long shmem_swapped = shmem_swap_usage(vma);
if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
!(vma->vm_flags & VM_WRITE)) {
mss->swap += shmem_swapped;
} else {
mss->check_shmem_swap = true;
smaps_walk.pte_hole = smaps_pte_hole;
}
}
#endif
/* mmap_sem is held in m_start */
walk_page_vma(vma, &smaps_walk);
}
|
safe
| 312
|
njs_string_to_number(const njs_value_t *value, njs_bool_t parse_float)
{
double num;
njs_bool_t minus;
const u_char *p, *start, *end;
njs_string_prop_t string;
const size_t infinity = njs_length("Infinity");
(void) njs_string_trim(value, &string, NJS_TRIM_START);
p = string.start;
end = p + string.size;
if (p == end) {
return parse_float ? NAN : 0.0;
}
minus = 0;
if (*p == '+') {
p++;
} else if (*p == '-') {
p++;
minus = 1;
}
if (p == end) {
return NAN;
}
if (!parse_float
&& p + 2 < end && p[0] == '0' && (p[1] == 'x' || p[1] == 'X'))
{
p += 2;
num = njs_number_hex_parse(&p, end, 0);
} else {
start = p;
num = njs_number_dec_parse(&p, end, 0);
if (p == start) {
if (p + infinity > end || memcmp(p, "Infinity", infinity) != 0) {
return NAN;
}
num = INFINITY;
p += infinity;
}
}
if (!parse_float) {
while (p < end) {
if (*p != ' ' && *p != '\t') {
return NAN;
}
p++;
}
}
return minus ? -num : num;
}
|
safe
| 313
|
rb_str_cat(VALUE str, const char *ptr, long len)
{
if (len < 0) {
rb_raise(rb_eArgError, "negative string size (or size too big)");
}
if (STR_ASSOC_P(str)) {
rb_str_modify(str);
if (STR_EMBED_P(str)) str_make_independent(str);
REALLOC_N(RSTRING(str)->as.heap.ptr, char, RSTRING(str)->as.heap.len+len+1);
memcpy(RSTRING(str)->as.heap.ptr + RSTRING(str)->as.heap.len, ptr, len);
RSTRING(str)->as.heap.len += len;
RSTRING(str)->as.heap.ptr[RSTRING(str)->as.heap.len] = '\0'; /* sentinel */
return str;
}
return rb_str_buf_cat(str, ptr, len);
}
|
safe
| 314
|
static void php_exec_ex(INTERNAL_FUNCTION_PARAMETERS, int mode) /* {{{ */
{
char *cmd;
size_t cmd_len;
zval *ret_code=NULL, *ret_array=NULL;
int ret;
if (mode) {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z/", &cmd, &cmd_len, &ret_code) == FAILURE) {
RETURN_FALSE;
}
} else {
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|z/z/", &cmd, &cmd_len, &ret_array, &ret_code) == FAILURE) {
RETURN_FALSE;
}
}
if (!cmd_len) {
php_error_docref(NULL, E_WARNING, "Cannot execute a blank command");
RETURN_FALSE;
}
if (strlen(cmd) != cmd_len) {
php_error_docref(NULL, E_WARNING, "NULL byte detected. Possible attack");
RETURN_FALSE;
}
if (!ret_array) {
ret = php_exec(mode, cmd, NULL, return_value);
} else {
if (Z_TYPE_P(ret_array) != IS_ARRAY) {
zval_dtor(ret_array);
array_init(ret_array);
}
ret = php_exec(2, cmd, ret_array, return_value);
}
if (ret_code) {
zval_dtor(ret_code);
ZVAL_LONG(ret_code, ret);
}
}
|
safe
| 315
|
CMS_ContentInfo *CMS_encrypt(STACK_OF(X509) *certs, BIO *data,
const EVP_CIPHER *cipher, unsigned int flags)
{
CMS_ContentInfo *cms;
int i;
X509 *recip;
cms = CMS_EnvelopedData_create(cipher);
if (!cms)
goto merr;
for (i = 0; i < sk_X509_num(certs); i++) {
recip = sk_X509_value(certs, i);
if (!CMS_add1_recipient_cert(cms, recip, flags)) {
CMSerr(CMS_F_CMS_ENCRYPT, CMS_R_RECIPIENT_ERROR);
goto err;
}
}
if (!(flags & CMS_DETACHED))
CMS_set_detached(cms, 0);
if ((flags & (CMS_STREAM | CMS_PARTIAL))
|| CMS_final(cms, data, NULL, flags))
return cms;
else
goto err;
merr:
CMSerr(CMS_F_CMS_ENCRYPT, ERR_R_MALLOC_FAILURE);
err:
CMS_ContentInfo_free(cms);
return NULL;
}
|
safe
| 316
|
detect_mysql_capabilities_for_backup()
{
if (xtrabackup_incremental) {
/* INNODB_CHANGED_PAGES are listed in
INFORMATION_SCHEMA.PLUGINS in MariaDB, but
FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS
is not supported for versions below 10.1.6
(see MDEV-7472) */
if (server_flavor == FLAVOR_MARIADB &&
mysql_server_version < 100106) {
have_changed_page_bitmaps = false;
}
}
/* do some sanity checks */
if (opt_galera_info && !have_galera_enabled) {
msg("--galera-info is specified on the command "
"line, but the server does not support Galera "
"replication. Ignoring the option.\n");
opt_galera_info = false;
}
if (opt_slave_info && have_multi_threaded_slave &&
!have_gtid_slave && !opt_safe_slave_backup) {
msg("The --slave-info option requires GTID enabled or "
"--safe-slave-backup option used for a multi-threaded "
"slave.\n");
return(false);
}
return(true);
}
|
safe
| 317
|
TEST_P(DownstreamProtocolIntegrationTest, HeadersOnlyFilterDecodingIntermediateFilters) {
config_helper_.addFilter(R"EOF(
name: passthrough-filter
)EOF");
config_helper_.addFilter(R"EOF(
name: decode-headers-only
)EOF");
config_helper_.addFilter(R"EOF(
name: passthrough-filter
)EOF");
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto response =
codec_client_->makeRequestWithBody(Http::TestHeaderMapImpl{{":method", "POST"},
{":path", "/test/long/url"},
{":scheme", "http"},
{":authority", "host"}},
128);
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(Http::TestHeaderMapImpl{{":status", "503"}}, false);
upstream_request_->encodeData(128, true);
response->waitForEndStream();
EXPECT_TRUE(response->complete());
EXPECT_EQ("503", response->headers().Status()->value().getStringView());
EXPECT_EQ(128, response->body().size());
}
|
safe
| 318
|
static int parse_import_stub(struct MACH0_(obj_t)* bin, struct symbol_t *symbol, int idx) {
int i, j, nsyms, stridx;
const char *symstr;
if (idx < 0) {
return 0;
}
symbol->offset = 0LL;
symbol->addr = 0LL;
symbol->name[0] = '\0';
if (!bin || !bin->sects) {
return false;
}
for (i = 0; i < bin->nsects; i++) {
if ((bin->sects[i].flags & SECTION_TYPE) == S_SYMBOL_STUBS && bin->sects[i].reserved2 > 0) {
nsyms = (int)(bin->sects[i].size / bin->sects[i].reserved2);
if (nsyms > bin->size) {
bprintf ("mach0: Invalid symbol table size\n");
}
for (j = 0; j < nsyms; j++) {
if (bin->sects) {
if (bin->sects[i].reserved1 + j >= bin->nindirectsyms) {
continue;
}
}
if (bin->indirectsyms) {
if (idx != bin->indirectsyms[bin->sects[i].reserved1 + j]) {
continue;
}
}
if (idx > bin->nsymtab) {
continue;
}
symbol->type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL;
symbol->offset = bin->sects[i].offset + j * bin->sects[i].reserved2;
symbol->addr = bin->sects[i].addr + j * bin->sects[i].reserved2;
symbol->size = 0;
stridx = bin->symtab[idx].n_strx;
if (stridx >= 0 && stridx < bin->symstrlen) {
symstr = (char *)bin->symstr+stridx;
} else {
symstr = "???";
}
// Remove the extra underscore that every import seems to have in Mach-O.
if (*symstr == '_') {
symstr++;
}
snprintf (symbol->name, R_BIN_MACH0_STRING_LENGTH, "imp.%s", symstr);
return true;
}
}
}
return false;
}
|
safe
| 319
|
void NumberFormatTest::verifyRounding(
DecimalFormat& format,
const double *values,
const char * const *expected,
const DecimalFormat::ERoundingMode *roundingModes,
const char * const *descriptions,
int32_t valueSize,
int32_t roundingModeSize) {
for (int32_t i = 0; i < roundingModeSize; ++i) {
format.setRoundingMode(roundingModes[i]);
for (int32_t j = 0; j < valueSize; j++) {
UnicodeString currentExpected(expected[i * valueSize + j]);
currentExpected = currentExpected.unescape();
UnicodeString actual;
format.format(values[j], actual);
if (currentExpected != actual) {
dataerrln("For %s value %f, expected '%s', got '%s'",
descriptions[i], values[j], CStr(currentExpected)(), CStr(actual)());
}
}
}
}
|
safe
| 320
|
create_ctas_nodata(List *tlist, IntoClause *into)
{
List *attrList;
ListCell *t,
*lc;
/*
* Build list of ColumnDefs from non-junk elements of the tlist. If a
* column name list was specified in CREATE TABLE AS, override the column
* names in the query. (Too few column names are OK, too many are not.)
*/
attrList = NIL;
lc = list_head(into->colNames);
foreach(t, tlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(t);
if (!tle->resjunk)
{
ColumnDef *col;
char *colname;
if (lc)
{
colname = strVal(lfirst(lc));
lc = lnext(into->colNames, lc);
}
else
colname = tle->resname;
col = makeColumnDef(colname,
exprType((Node *) tle->expr),
exprTypmod((Node *) tle->expr),
exprCollation((Node *) tle->expr));
/*
* It's possible that the column is of a collatable type but the
* collation could not be resolved, so double-check. (We must
* check this here because DefineRelation would adopt the type's
* default collation rather than complaining.)
*/
if (!OidIsValid(col->collOid) &&
type_is_collatable(col->typeName->typeOid))
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_COLLATION),
errmsg("no collation was derived for column \"%s\" with collatable type %s",
col->colname,
format_type_be(col->typeName->typeOid)),
errhint("Use the COLLATE clause to set the collation explicitly.")));
attrList = lappend(attrList, col);
}
}
if (lc != NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("too many column names were specified")));
/* Create the relation definition using the ColumnDef list */
return create_ctas_internal(attrList, into);
}
|
safe
| 321
|
execdict(char *word)
{
char *w, *dictcmd;
Buffer *buf;
if (!UseDictCommand || word == NULL || *word == '\0') {
displayBuffer(Currentbuf, B_NORMAL);
return;
}
w = conv_to_system(word);
if (*w == '\0') {
displayBuffer(Currentbuf, B_NORMAL);
return;
}
dictcmd = Sprintf("%s?%s", DictCommand,
Str_form_quote(Strnew_charp(w))->ptr)->ptr;
buf = loadGeneralFile(dictcmd, NULL, NO_REFERER, 0, NULL);
if (buf == NULL) {
disp_message("Execution failed", TRUE);
return;
}
else if (buf != NO_BUFFER) {
buf->filename = w;
buf->buffername = Sprintf("%s %s", DICTBUFFERNAME, word)->ptr;
if (buf->type == NULL)
buf->type = "text/plain";
pushBuffer(buf);
}
displayBuffer(Currentbuf, B_FORCE_REDRAW);
}
|
safe
| 322
|
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct pt_regs *regs;
struct perf_event *event;
u64 period;
event = container_of(hrtimer, struct perf_event, hw.hrtimer);
if (event->state != PERF_EVENT_STATE_ACTIVE)
return HRTIMER_NORESTART;
event->pmu->read(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && is_idle_task(current)))
if (__perf_event_overflow(event, 1, &data, regs))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, event->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
|
safe
| 323
|
static void
php_mysqlnd_chg_user_free_mem(void * _packet, zend_bool stack_allocation TSRMLS_DC)
{
MYSQLND_PACKET_CHG_USER_RESPONSE * p = (MYSQLND_PACKET_CHG_USER_RESPONSE *) _packet;
if (p->new_auth_protocol) {
mnd_efree(p->new_auth_protocol);
p->new_auth_protocol = NULL;
}
p->new_auth_protocol_len = 0;
if (p->new_auth_protocol_data) {
mnd_efree(p->new_auth_protocol_data);
p->new_auth_protocol_data = NULL;
}
p->new_auth_protocol_data_len = 0;
if (!stack_allocation) {
mnd_pefree(p, p->header.persistent);
}
|
safe
| 324
|
pa_etype_info(krb5_context context,
const krb5_principal client,
const AS_REQ *asreq,
struct pa_info_data *paid,
heim_octet_string *data)
{
krb5_error_code ret;
ETYPE_INFO e;
size_t sz;
size_t i, j;
memset(&e, 0, sizeof(e));
ret = decode_ETYPE_INFO(data->data, data->length, &e, &sz);
if (ret)
goto out;
if (e.len == 0)
goto out;
for (j = 0; j < asreq->req_body.etype.len; j++) {
for (i = 0; i < e.len; i++) {
if (asreq->req_body.etype.val[j] == e.val[i].etype) {
krb5_salt salt;
salt.salttype = KRB5_PW_SALT;
if (e.val[i].salt == NULL)
ret = krb5_get_pw_salt(context, client, &salt);
else {
salt.saltvalue = *e.val[i].salt;
ret = 0;
}
if (e.val[i].salttype)
salt.salttype = *e.val[i].salttype;
if (ret == 0) {
ret = set_paid(paid, context, e.val[i].etype,
salt.salttype,
salt.saltvalue.data,
salt.saltvalue.length,
NULL);
if (e.val[i].salt == NULL)
krb5_free_salt(context, salt);
}
if (ret == 0) {
free_ETYPE_INFO(&e);
return paid;
}
}
}
}
out:
free_ETYPE_INFO(&e);
return NULL;
}
|
safe
| 325
|
TEST_F(NgramKernelTest, TestOverlappingUnpaddedNGrams) {
MakeOp("|", {3}, "", "", 0, false);
// Batch items are:
// 0: "a"
// 1: "b", "c", "d"
// 2: "e", "f"
AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"});
AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6});
TF_ASSERT_OK(RunOpKernel());
std::vector<tstring> expected_values({"b|c|d"});
std::vector<int64> expected_splits({0, 0, 1, 1});
assert_string_equal(expected_values, *GetOutput(0));
assert_int64_equal(expected_splits, *GetOutput(1));
}
|
safe
| 326
|
void CServer::ConLogout(IConsole::IResult *pResult, void *pUser)
{
CServer *pServer = (CServer *)pUser;
if(pServer->m_RconClientID >= 0 && pServer->m_RconClientID < MAX_CLIENTS &&
pServer->m_aClients[pServer->m_RconClientID].m_State != CServer::CClient::STATE_EMPTY)
{
CMsgPacker Msg(NETMSG_RCON_AUTH_OFF, true);
pServer->SendMsg(&Msg, MSGFLAG_VITAL, pServer->m_RconClientID);
pServer->m_aClients[pServer->m_RconClientID].m_Authed = AUTHED_NO;
pServer->m_aClients[pServer->m_RconClientID].m_AuthTries = 0;
pServer->m_aClients[pServer->m_RconClientID].m_pRconCmdToSend = 0;
pServer->m_aClients[pServer->m_RconClientID].m_pMapListEntryToSend = 0;
pServer->SendRconLine(pServer->m_RconClientID, "Logout successful.");
char aBuf[32];
str_format(aBuf, sizeof(aBuf), "ClientID=%d logged out", pServer->m_RconClientID);
pServer->Console()->Print(IConsole::OUTPUT_LEVEL_STANDARD, "server", aBuf);
}
}
|
safe
| 327
|
xfs_attr3_rmt_verify(
struct xfs_mount *mp,
void *ptr,
int fsbsize,
xfs_daddr_t bno)
{
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return false;
if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC))
return false;
if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
return false;
if (be64_to_cpu(rmt->rm_blkno) != bno)
return false;
if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
return false;
if (be32_to_cpu(rmt->rm_offset) +
be32_to_cpu(rmt->rm_bytes) > XATTR_SIZE_MAX)
return false;
if (rmt->rm_owner == 0)
return false;
return true;
}
|
safe
| 328
|
int rad_packet_add_ifid(struct rad_packet_t *pack, const char *vendor_name, const char *name, uint64_t ifid)
{
struct rad_attr_t *ra;
struct rad_dict_attr_t *attr;
struct rad_dict_vendor_t *vendor;
if (pack->len + (vendor_name ? 8 : 2) + 8 >= REQ_LENGTH_MAX)
return -1;
if (vendor_name) {
vendor = rad_dict_find_vendor_name(vendor_name);
if (!vendor)
return -1;
attr = rad_dict_find_vendor_attr(vendor, name);
} else {
vendor = NULL;
attr = rad_dict_find_attr(name);
}
if (!attr)
return -1;
ra = mempool_alloc(attr_pool);
if (!ra)
return -1;
memset(ra, 0, sizeof(*ra));
ra->vendor = vendor;
ra->attr = attr;
ra->len = 8;
ra->val.ifid = ifid;
ra->raw = &ra->val;
list_add_tail(&ra->entry, &pack->attrs);
pack->len += (vendor_name ? 8 : 2) + 8;
return 0;
}
|
safe
| 329
|
static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
int aq_err, ret = 0;
if (list_empty(&vsi->macvlan_list))
return;
list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
if (i40e_is_channel_macvlan(ch)) {
ret = i40e_del_macvlan_filter(hw, ch->seid,
i40e_channel_mac(ch),
&aq_err);
if (!ret) {
/* Reset queue contexts */
i40e_reset_ch_rings(vsi, ch);
clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask);
netdev_unbind_sb_channel(vsi->netdev,
ch->fwd->netdev);
netdev_set_sb_channel(ch->fwd->netdev, 0);
kfree(ch->fwd);
ch->fwd = NULL;
}
}
}
}
|
safe
| 330
|
static Bool txtin_process_event(GF_Filter *filter, const GF_FilterEvent *evt)
{
GF_TXTIn *ctx = gf_filter_get_udta(filter);
switch (evt->base.type) {
case GF_FEVT_PLAY:
if (ctx->playstate==1) return GF_TRUE;
ctx->playstate = 1;
if ((ctx->start_range < 0.1) && (evt->play.start_range<0.1)) return GF_TRUE;
ctx->start_range = evt->play.start_range;
ctx->seek_state = 1;
//cancel play event, we work with full file
return GF_TRUE;
case GF_FEVT_STOP:
ctx->playstate = 2;
//cancel play event, we work with full file
return GF_TRUE;
default:
return GF_FALSE;
}
return GF_FALSE;
}
|
safe
| 331
|
static int cx24116_initfe(struct dvb_frontend *fe)
{
struct cx24116_state *state = fe->demodulator_priv;
struct cx24116_cmd cmd;
int ret;
dprintk("%s()\n", __func__);
/* Power on */
cx24116_writereg(state, 0xe0, 0);
cx24116_writereg(state, 0xe1, 0);
cx24116_writereg(state, 0xea, 0);
/* Firmware CMD 36: Power config */
cmd.args[0x00] = CMD_TUNERSLEEP;
cmd.args[0x01] = 0;
cmd.len = 0x02;
ret = cx24116_cmd_execute(fe, &cmd);
if (ret != 0)
return ret;
ret = cx24116_diseqc_init(fe);
if (ret != 0)
return ret;
/* HVR-4000 needs this */
return cx24116_set_voltage(fe, SEC_VOLTAGE_13);
}
|
safe
| 332
|
check_file_output (struct url *u, struct http_stat *hs,
struct response *resp, char *hdrval, size_t hdrsize)
{
/* Determine the local filename if needed. Notice that if -O is used
* hstat.local_file is set by http_loop to the argument of -O. */
if (!hs->local_file)
{
char *local_file = NULL;
/* Honor Content-Disposition whether possible. */
if (!opt.content_disposition
|| !resp_header_copy (resp, "Content-Disposition",
hdrval, hdrsize)
|| !parse_content_disposition (hdrval, &local_file))
{
/* The Content-Disposition header is missing or broken.
* Choose unique file name according to given URL. */
hs->local_file = url_file_name (u, NULL);
}
else
{
DEBUGP (("Parsed filename from Content-Disposition: %s\n",
local_file));
hs->local_file = url_file_name (u, local_file);
}
xfree (local_file);
}
/* TODO: perform this check only once. */
if (!hs->existence_checked && file_exists_p (hs->local_file))
{
if (opt.noclobber && !opt.output_document)
{
/* If opt.noclobber is turned on and file already exists, do not
retrieve the file. But if the output_document was given, then this
test was already done and the file didn't exist. Hence the !opt.output_document */
return RETRUNNEEDED;
}
else if (!ALLOW_CLOBBER)
{
char *unique = unique_name (hs->local_file, true);
if (unique != hs->local_file)
xfree (hs->local_file);
hs->local_file = unique;
}
}
hs->existence_checked = true;
/* Support timestamping */
if (opt.timestamping && !hs->timestamp_checked)
{
uerr_t timestamp_err = set_file_timestamp (hs);
if (timestamp_err != RETROK)
return timestamp_err;
}
return RETROK;
}
|
safe
| 333
|
get_attachments_response_cb (ESoapResponse *response,
GSimpleAsyncResult *simple)
{
EwsAsyncData *async_data;
ESoapParameter *param;
ESoapParameter *subparam;
GError *error = NULL;
async_data = g_simple_async_result_get_op_res_gpointer (simple);
param = e_soap_response_get_first_parameter_by_name (
response, "ResponseMessages", &error);
/* Sanity check */
g_return_if_fail (
(param != NULL && error == NULL) ||
(param == NULL && error != NULL));
if (error != NULL) {
g_simple_async_result_take_error (simple, error);
return;
}
subparam = e_soap_parameter_get_first_child (param);
while (subparam != NULL) {
const gchar *name = (const gchar *) subparam->name;
if (!ews_get_response_status (subparam, &error)) {
g_simple_async_result_take_error (simple, error);
return;
}
if (E_EWS_CONNECTION_UTILS_CHECK_ELEMENT (name, "GetAttachmentResponseMessage"))
ews_handle_attachments_param (subparam, async_data);
subparam = e_soap_parameter_get_next_child (subparam);
}
}
|
safe
| 334
|
struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
{
struct b43_dma *dma = &dev->dma;
struct b43_dmaring *ring = NULL;
switch (cookie & 0xF000) {
case 0x1000:
ring = dma->tx_ring_AC_BK;
break;
case 0x2000:
ring = dma->tx_ring_AC_BE;
break;
case 0x3000:
ring = dma->tx_ring_AC_VI;
break;
case 0x4000:
ring = dma->tx_ring_AC_VO;
break;
case 0x5000:
ring = dma->tx_ring_mcast;
break;
}
*slot = (cookie & 0x0FFF);
if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
b43dbg(dev->wl, "TX-status contains "
"invalid cookie: 0x%04X\n", cookie);
return NULL;
}
return ring;
}
|
safe
| 335
|
static int nf_tables_dump_rules(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
const struct nft_rule_dump_ctx *ctx = cb->data;
struct nft_table *table;
const struct nft_chain *chain;
unsigned int idx = 0;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
rcu_read_lock();
nft_net = nft_pernet(net);
cb->seq = READ_ONCE(nft_net->base_seq);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
if (ctx && ctx->table && ctx->chain) {
struct rhlist_head *list, *tmp;
list = rhltable_lookup(&table->chains_ht, ctx->chain,
nft_chain_ht_params);
if (!list)
goto done;
rhl_for_each_entry_rcu(chain, tmp, list, rhlhead) {
if (!nft_is_active(net, chain))
continue;
__nf_tables_dump_rules(skb, &idx,
cb, table, chain);
break;
}
goto done;
}
list_for_each_entry_rcu(chain, &table->chains, list) {
if (__nf_tables_dump_rules(skb, &idx, cb, table, chain))
goto done;
}
if (ctx && ctx->table)
break;
}
done:
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
|
safe
| 336
|
static int wsgi_reload_required(apr_pool_t *pool, request_rec *r,
const char *filename, PyObject *module,
const char *resource)
{
PyObject *dict = NULL;
PyObject *object = NULL;
apr_time_t mtime = 0;
dict = PyModule_GetDict(module);
object = PyDict_GetItemString(dict, "__mtime__");
if (object) {
mtime = PyLong_AsLongLong(object);
if (!r || strcmp(r->filename, filename)) {
apr_finfo_t finfo;
if (apr_stat(&finfo, filename, APR_FINFO_NORM,
pool) != APR_SUCCESS) {
return 1;
}
else if (mtime != finfo.mtime) {
return 1;
}
}
else {
if (mtime != r->finfo.mtime)
return 1;
}
}
else
return 1;
if (resource) {
PyObject *dict = NULL;
PyObject *object = NULL;
dict = PyModule_GetDict(module);
object = PyDict_GetItemString(dict, "reload_required");
if (object) {
PyObject *args = NULL;
PyObject *result = NULL;
Py_INCREF(object);
args = Py_BuildValue("(s)", resource);
result = PyEval_CallObject(object, args);
Py_DECREF(args);
Py_DECREF(object);
if (result && PyObject_IsTrue(result)) {
Py_DECREF(result);
return 1;
}
if (PyErr_Occurred())
wsgi_log_python_error(r, NULL, filename);
Py_XDECREF(result);
}
}
return 0;
}
|
safe
| 337
|
void CSmoothTime::Update(CGraph *pGraph, int64 Target, int TimeLeft, int AdjustDirection)
{
int UpdateTimer = 1;
if(TimeLeft < 0)
{
int IsSpike = 0;
if(TimeLeft < -50)
{
IsSpike = 1;
m_SpikeCounter += 5;
if(m_SpikeCounter > 50)
m_SpikeCounter = 50;
}
if(IsSpike && m_SpikeCounter < 15)
{
// ignore this ping spike
UpdateTimer = 0;
pGraph->Add(TimeLeft, 1,1,0);
}
else
{
pGraph->Add(TimeLeft, 1,0,0);
if(m_aAdjustSpeed[AdjustDirection] < 30.0f)
m_aAdjustSpeed[AdjustDirection] *= 2.0f;
}
}
else
{
if(m_SpikeCounter)
m_SpikeCounter--;
pGraph->Add(TimeLeft, 0,1,0);
m_aAdjustSpeed[AdjustDirection] *= 0.95f;
if(m_aAdjustSpeed[AdjustDirection] < 2.0f)
m_aAdjustSpeed[AdjustDirection] = 2.0f;
}
if(UpdateTimer)
UpdateInt(Target);
}
|
safe
| 338
|
static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
{
struct nfs_fsinfo fsinfo;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
.rpc_argp = clp,
.rpc_resp = &fsinfo,
.rpc_cred = cred,
};
unsigned long now;
int status;
now = jiffies;
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
if (status == 0) {
spin_lock(&clp->cl_lock);
clp->cl_lease_time = fsinfo.lease_time * HZ;
clp->cl_last_renewal = now;
spin_unlock(&clp->cl_lock);
}
return status;
}
|
safe
| 339
|
static bool str_append_nstring(string_t *str, const struct imap_arg *arg)
{
const char *cstr;
if (!imap_arg_get_nstring(arg, &cstr))
return FALSE;
switch (arg->type) {
case IMAP_ARG_NIL:
str_append(str, "NIL");
break;
case IMAP_ARG_ATOM:
str_append(str, cstr);
break;
case IMAP_ARG_STRING:
str_append_c(str, '"');
/* NOTE: we're parsing with no-unescape flag,
so don't double-escape it here */
str_append(str, cstr);
str_append_c(str, '"');
break;
case IMAP_ARG_LITERAL: {
str_printfa(str, "{%zu}\r\n", strlen(cstr));
str_append(str, cstr);
break;
}
default:
i_unreached();
return FALSE;
}
return TRUE;
}
|
safe
| 340
|
static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
struct inode *parent)
{
int len = *lenp;
struct kernel_lb_addr location = UDF_I(inode)->i_location;
struct fid *fid = (struct fid *)fh;
int type = FILEID_UDF_WITHOUT_PARENT;
if (parent && (len < 5)) {
*lenp = 5;
return 255;
} else if (len < 3) {
*lenp = 3;
return 255;
}
*lenp = 3;
fid->udf.block = location.logicalBlockNum;
fid->udf.partref = location.partitionReferenceNum;
fid->udf.parent_partref = 0;
fid->udf.generation = inode->i_generation;
if (parent) {
location = UDF_I(parent)->i_location;
fid->udf.parent_block = location.logicalBlockNum;
fid->udf.parent_partref = location.partitionReferenceNum;
fid->udf.parent_generation = inode->i_generation;
*lenp = 5;
type = FILEID_UDF_WITH_PARENT;
}
return type;
}
|
safe
| 341
|
int fb_new_modelist(struct fb_info *info)
{
struct fb_event event;
struct fb_var_screeninfo var = info->var;
struct list_head *pos, *n;
struct fb_modelist *modelist;
struct fb_videomode *m, mode;
int err = 1;
list_for_each_safe(pos, n, &info->modelist) {
modelist = list_entry(pos, struct fb_modelist, list);
m = &modelist->mode;
fb_videomode_to_var(&var, m);
var.activate = FB_ACTIVATE_TEST;
err = fb_set_var(info, &var);
fb_var_to_videomode(&mode, &var);
if (err || !fb_mode_is_equal(m, &mode)) {
list_del(pos);
kfree(pos);
}
}
err = 1;
if (!list_empty(&info->modelist)) {
event.info = info;
err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
}
return err;
}
|
safe
| 342
|
double my_double_round(double value, longlong dec, bool dec_unsigned,
bool truncate)
{
double tmp;
bool dec_negative= (dec < 0) && !dec_unsigned;
ulonglong abs_dec= dec_negative ? -dec : dec;
/*
tmp2 is here to avoid return the value with 80 bit precision
This will fix that the test round(0.1,1) = round(0.1,1) is true
Tagging with volatile is no guarantee, it may still be optimized away...
*/
volatile double tmp2;
tmp=(abs_dec < array_elements(log_10) ?
log_10[abs_dec] : pow(10.0,(double) abs_dec));
// Pre-compute these, to avoid optimizing away e.g. 'floor(v/tmp) * tmp'.
volatile double value_div_tmp= value / tmp;
volatile double value_mul_tmp= value * tmp;
if (!dec_negative && std::isinf(tmp)) // "dec" is too large positive number
return value;
if (dec_negative && std::isinf(tmp))
tmp2= 0.0;
else if (!dec_negative && std::isinf(value_mul_tmp))
tmp2= value;
else if (truncate)
{
if (value >= 0.0)
tmp2= dec < 0 ? floor(value_div_tmp) * tmp : floor(value_mul_tmp) / tmp;
else
tmp2= dec < 0 ? ceil(value_div_tmp) * tmp : ceil(value_mul_tmp) / tmp;
}
else
tmp2=dec < 0 ? rint(value_div_tmp) * tmp : rint(value_mul_tmp) / tmp;
return tmp2;
}
|
safe
| 343
|
ha_rows st_select_lex::get_limit()
{
ulonglong val= HA_POS_ERROR;
if (select_limit)
{
/*
fix_fields() has not been called for select_limit. That's due to the
historical reasons -- this item could be only of type Item_int, and
Item_int does not require fix_fields(). Thus, fix_fields() was never
called for select_limit.
Some time ago, Item_splocal was also allowed for LIMIT / OFFSET clauses.
However, the fix_fields() behavior was not updated, which led to a crash
in some cases.
There is no single place where to call fix_fields() for LIMIT / OFFSET
items during the fix-fields-phase. Thus, for the sake of readability,
it was decided to do it here, on the evaluation phase (which is a
violation of design, but we chose the lesser of two evils).
We can call fix_fields() here, because select_limit can be of two
types only: Item_int and Item_splocal. Item_int::fix_fields() is trivial,
and Item_splocal::fix_fields() (or rather Item_sp_variable::fix_fields())
has the following properties:
1) it does not affect other items;
2) it does not fail.
Nevertheless DBUG_ASSERT was added to catch future changes in
fix_fields() implementation. Also added runtime check against a result
of fix_fields() in order to handle error condition in non-debug build.
*/
bool fix_fields_successful= true;
if (!select_limit->fixed)
{
fix_fields_successful= !select_limit->fix_fields(master_unit()->thd,
NULL);
DBUG_ASSERT(fix_fields_successful);
}
val= fix_fields_successful ? select_limit->val_uint() : HA_POS_ERROR;
}
return (ha_rows)val;
}
|
safe
| 344
|
apdu_close_reader (int slot)
{
int sw;
if (DBG_READER)
log_debug ("enter: apdu_close_reader: slot=%d\n", slot);
if (slot < 0 || slot >= MAX_READER || !reader_table[slot].used )
{
if (DBG_READER)
log_debug ("leave: apdu_close_reader => SW_HOST_NO_DRIVER\n");
return SW_HOST_NO_DRIVER;
}
sw = apdu_disconnect (slot);
if (sw)
{
if (DBG_READER)
log_debug ("leave: apdu_close_reader => 0x%x (apdu_disconnect)\n", sw);
return sw;
}
if (reader_table[slot].close_reader)
{
sw = reader_table[slot].close_reader (slot);
if (DBG_READER)
log_debug ("leave: apdu_close_reader => 0x%x (close_reader)\n", sw);
return sw;
}
if (DBG_READER)
log_debug ("leave: apdu_close_reader => SW_HOST_NOT_SUPPORTED\n");
return SW_HOST_NOT_SUPPORTED;
}
|
safe
| 345
|
f_term_sendkeys(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_sendkeys()");
char_u *msg;
term_T *term;
rettv->v_type = VAR_UNKNOWN;
if (buf == NULL)
return;
msg = tv_get_string_chk(&argvars[1]);
if (msg == NULL)
return;
term = buf->b_term;
if (term->tl_vterm == NULL)
return;
while (*msg != NUL)
{
int c;
if (*msg == K_SPECIAL && msg[1] != NUL && msg[2] != NUL)
{
c = TO_SPECIAL(msg[1], msg[2]);
msg += 3;
}
else
{
c = PTR2CHAR(msg);
msg += MB_CPTR2LEN(msg);
}
send_keys_to_term(term, c, FALSE);
}
}
|
safe
| 346
|
static void ssl_write_max_fragment_length_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
if( ssl->session_negotiate->mfl_code == MBEDTLS_SSL_MAX_FRAG_LEN_NONE )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, max_fragment_length extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_MAX_FRAGMENT_LENGTH ) & 0xFF );
*p++ = 0x00;
*p++ = 1;
*p++ = ssl->session_negotiate->mfl_code;
*olen = 5;
}
|
safe
| 347
|
static void fts3EvalRestart(
Fts3Cursor *pCsr,
Fts3Expr *pExpr,
int *pRc
){
if( pExpr && *pRc==SQLITE_OK ){
Fts3Phrase *pPhrase = pExpr->pPhrase;
if( pPhrase ){
fts3EvalInvalidatePoslist(pPhrase);
if( pPhrase->bIncr ){
int i;
for(i=0; i<pPhrase->nToken; i++){
Fts3PhraseToken *pToken = &pPhrase->aToken[i];
assert( pToken->pDeferred==0 );
if( pToken->pSegcsr ){
sqlite3Fts3MsrIncrRestart(pToken->pSegcsr);
}
}
*pRc = fts3EvalPhraseStart(pCsr, 0, pPhrase);
}
pPhrase->doclist.pNextDocid = 0;
pPhrase->doclist.iDocid = 0;
pPhrase->pOrPoslist = 0;
}
pExpr->iDocid = 0;
pExpr->bEof = 0;
pExpr->bStart = 0;
fts3EvalRestart(pCsr, pExpr->pLeft, pRc);
fts3EvalRestart(pCsr, pExpr->pRight, pRc);
}
}
|
safe
| 348
|
void plugin_thdvar_cleanup(THD *thd)
{
uint idx;
plugin_ref *list;
DBUG_ENTER("plugin_thdvar_cleanup");
mysql_mutex_lock(&LOCK_plugin);
unlock_variables(thd, &thd->variables);
cleanup_variables(&thd->variables);
if ((idx= thd->lex->plugins.elements))
{
list= ((plugin_ref*) thd->lex->plugins.buffer) + idx - 1;
DBUG_PRINT("info",("unlocking %d plugins", idx));
while ((uchar*) list >= thd->lex->plugins.buffer)
intern_plugin_unlock(NULL, *list--);
}
reap_plugins();
mysql_mutex_unlock(&LOCK_plugin);
reset_dynamic(&thd->lex->plugins);
DBUG_VOID_RETURN;
}
|
safe
| 349
|
void usage(void) {
fprintf(stderr,"Usage: ./redis-server [/path/to/redis.conf] [options]\n");
fprintf(stderr," ./redis-server - (read config from stdin)\n");
fprintf(stderr," ./redis-server -v or --version\n");
fprintf(stderr," ./redis-server -h or --help\n");
fprintf(stderr," ./redis-server --test-memory <megabytes>\n\n");
fprintf(stderr,"Examples:\n");
fprintf(stderr," ./redis-server (run the server with default conf)\n");
fprintf(stderr," ./redis-server /etc/redis/6379.conf\n");
fprintf(stderr," ./redis-server --port 7777\n");
fprintf(stderr," ./redis-server --port 7777 --replicaof 127.0.0.1 8888\n");
fprintf(stderr," ./redis-server /etc/myredis.conf --loglevel verbose\n\n");
fprintf(stderr,"Sentinel mode:\n");
fprintf(stderr," ./redis-server /etc/sentinel.conf --sentinel\n");
exit(1);
}
|
safe
| 350
|
mt_get(mrb_state *mrb, mt_tbl *t, mrb_sym sym, union mt_ptr *pp)
{
int hash, pos, start;
if (t == NULL) return 0;
if (t->alloc == 0) return 0;
if (t->size == 0) return 0;
mrb_sym *keys = (mrb_sym*)&t->ptr[t->alloc];
union mt_ptr *vals = t->ptr;
hash = kh_int_hash_func(mrb, sym);
#ifdef MRB_USE_INLINE_METHOD_CACHE
int cpos = (hash^(uintptr_t)t) % MT_CACHE_SIZE;
pos = mt_cache[cpos];
if (cpos < t->alloc && t->table[cpos].key == sym) {
return &t->table[cpos];
}
#endif
start = pos = hash & (t->alloc-1);
for (;;) {
mrb_sym key = keys[pos];
if (MT_KEY_SYM(key) == sym) {
*pp = vals[pos];
#ifdef MRB_USE_INLINE_METHOD_CACHE
if (pos < 0xff) {
mt_cache[cpos] = pos;
}
#endif
return key;
}
else if (key == MT_EMPTY) {
return 0;
}
pos = (pos+1) & (t->alloc-1);
if (pos == start) { /* not found */
return 0;
}
}
}
|
safe
| 351
|
int pam_modutil_drop_priv(pam_handle_t *pamh, struct _ykpam_privs *privs, struct passwd *pw) {
privs->saved_euid = geteuid();
privs->saved_egid = getegid();
if ((privs->saved_euid == pw->pw_uid) && (privs->saved_egid == pw->pw_gid)) {
D (privs->debug_file, "Privilges already dropped, pretend it is all right");
return 0;
}
privs->saved_groups_length = getgroups(0, NULL);
if (privs->saved_groups_length < 0) {
D (privs->debug_file, "getgroups: %s", strerror(errno));
return -1;
}
if (privs->saved_groups_length > SAVED_GROUPS_MAX_LEN) {
D (privs->debug_file, "too many groups, limiting.");
privs->saved_groups_length = SAVED_GROUPS_MAX_LEN;
}
if (privs->saved_groups_length > 0) {
if (getgroups(privs->saved_groups_length, privs->saved_groups) < 0) {
D (privs->debug_file, "getgroups: %s", strerror(errno));
goto free_out;
}
}
if (initgroups(pw->pw_name, pw->pw_gid) < 0) {
D (privs->debug_file, "initgroups: %s", strerror(errno));
goto free_out;
}
if (setegid(pw->pw_gid) < 0) {
D (privs->debug_file, "setegid: %s", strerror(errno));
goto free_out;
}
if (seteuid(pw->pw_uid) < 0) {
D (privs->debug_file, "seteuid: %s", strerror(errno));
goto free_out;
}
return 0;
free_out:
return -1;
}
|
safe
| 352
|
void tls1_get_supported_groups(SSL *s, const uint16_t **pgroups,
size_t *pgroupslen)
{
#if !defined(OPENSSL_NO_EC) || !defined(OPENSSL_NO_DH)
/* For Suite B mode only include P-256, P-384 */
switch (tls1_suiteb(s)) {
# ifndef OPENSSL_NO_EC
case SSL_CERT_FLAG_SUITEB_128_LOS:
*pgroups = suiteb_curves;
*pgroupslen = OSSL_NELEM(suiteb_curves);
break;
case SSL_CERT_FLAG_SUITEB_128_LOS_ONLY:
*pgroups = suiteb_curves;
*pgroupslen = 1;
break;
case SSL_CERT_FLAG_SUITEB_192_LOS:
*pgroups = suiteb_curves + 1;
*pgroupslen = 1;
break;
# endif
default:
if (s->ext.supportedgroups == NULL) {
*pgroups = supported_groups_default;
*pgroupslen = OSSL_NELEM(supported_groups_default);
} else {
*pgroups = s->ext.supportedgroups;
*pgroupslen = s->ext.supportedgroups_len;
}
break;
}
#else
*pgroups = NULL;
*pgroupslen = 0;
#endif /* !defined(OPENSSL_NO_EC) || !defined(OPENSSL_NO_DH) */
}
|
safe
| 353
|
static int compare_certificates (X509 *cert, X509 *peercert,
unsigned char *peermd, unsigned int peermdlen)
{
unsigned char md[EVP_MAX_MD_SIZE];
unsigned int mdlen;
/* Avoid CPU-intensive digest calculation if the certificates are
* not even remotely equal.
*/
if (X509_subject_name_cmp (cert, peercert) != 0 ||
X509_issuer_name_cmp (cert, peercert) != 0)
return -1;
if (!X509_digest (cert, EVP_sha256(), md, &mdlen) || peermdlen != mdlen)
return -1;
if (memcmp(peermd, md, mdlen) != 0)
return -1;
return 0;
}
|
safe
| 354
|
zone_send_secureserial(dns_zone_t *zone, uint32_t serial) {
isc_event_t *e;
dns_zone_t *dummy = NULL;
e = isc_event_allocate(zone->secure->mctx, zone,
DNS_EVENT_ZONESECURESERIAL,
receive_secure_serial, zone->secure,
sizeof(struct secure_event));
if (e == NULL)
return (ISC_R_NOMEMORY);
((struct secure_event *)e)->serial = serial;
INSIST(LOCKED_ZONE(zone->secure));
zone_iattach(zone->secure, &dummy);
isc_task_send(zone->secure->task, &e);
DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_SENDSECURE);
return (ISC_R_SUCCESS);
}
|
safe
| 355
|
static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct page *page = NULL;
int error;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
if (attr->ia_size < inode->i_size) {
/*
* If truncating down to a partial page, then
* if that page is already allocated, hold it
* in memory until the truncation is over, so
* truncate_partial_page cannnot miss it were
* it assigned to swap.
*/
if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
(void) shmem_getpage(inode,
attr->ia_size>>PAGE_CACHE_SHIFT,
&page, SGP_READ, NULL);
}
/*
* Reset SHMEM_PAGEIN flag so that shmem_truncate can
* detect if any pages might have been added to cache
* after truncate_inode_pages. But we needn't bother
* if it's being fully truncated to zero-length: the
* nrpages check is efficient enough in that case.
*/
if (attr->ia_size) {
struct shmem_inode_info *info = SHMEM_I(inode);
spin_lock(&info->lock);
info->flags &= ~SHMEM_PAGEIN;
spin_unlock(&info->lock);
}
}
}
error = inode_change_ok(inode, attr);
if (!error)
error = inode_setattr(inode, attr);
#ifdef CONFIG_TMPFS_POSIX_ACL
if (!error && (attr->ia_valid & ATTR_MODE))
error = generic_acl_chmod(inode, &shmem_acl_ops);
#endif
if (page)
page_cache_release(page);
return error;
}
|
safe
| 356
|
finish_bgprint(fz_context *ctx)
{
int status;
if (!bgprint.active)
return;
/* If we are using it, then wait for it to finish. */
status = wait_for_bgprint_to_finish();
if (status == RENDER_OK)
{
/* The background bgprint completed successfully. */
return;
}
if (status == RENDER_FATAL)
{
/* We failed because of not being able to output. No point in retrying. */
fz_throw(ctx, FZ_ERROR_GENERIC, "Failed to render page");
}
bgprint.started = 1;
bgprint.solo = 1;
mu_trigger_semaphore(&bgprint.start);
status = wait_for_bgprint_to_finish();
if (status != 0)
{
/* Hard failure */
fz_throw(ctx, FZ_ERROR_GENERIC, "Failed to render page");
}
}
|
safe
| 357
|
TEST_F(Http1ServerConnectionImplTest, LargeRequestUrlRejected) {
initialize();
std::string exception_reason;
NiceMock<MockRequestDecoder> decoder;
Http::ResponseEncoder* response_encoder = nullptr;
EXPECT_CALL(callbacks_, newStream(_, _))
.WillOnce(Invoke([&](ResponseEncoder& encoder, bool) -> RequestDecoder& {
response_encoder = &encoder;
return decoder;
}));
// Default limit of 60 KiB
std::string long_url = "/" + std::string(60 * 1024, 'q');
Buffer::OwnedImpl buffer("GET " + long_url + " HTTP/1.1\r\n");
auto status = codec_->dispatch(buffer);
EXPECT_TRUE(isCodecProtocolError(status));
EXPECT_EQ(status.message(), "headers size exceeds limit");
EXPECT_EQ("http1.headers_too_large", response_encoder->getStream().responseDetails());
}
|
safe
| 358
|
TEST_P(ProtocolIntegrationTest, 304ResponseWithoutContentLength) {
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto response = codec_client_->makeHeaderOnlyRequest(
Http::TestRequestHeaderMapImpl{{":method", "GET"},
{":path", "/test/long/url"},
{":scheme", "http"},
{":authority", "host"},
{"if-none-match", "\"1234567890\""}});
waitForNextUpstreamRequest();
upstream_request_->encodeHeaders(
Http::TestResponseHeaderMapImpl{{":status", "304"}, {"etag", "\"1234567890\""}}, true);
ASSERT_TRUE(response->waitForEndStream());
EXPECT_TRUE(response->complete());
EXPECT_EQ("304", response->headers().getStatusValue());
EXPECT_TRUE(response->headers().get(Http::LowerCaseString("transfer-encoding")).empty());
EXPECT_TRUE(response->headers().get(Http::LowerCaseString("content-length")).empty());
}
|
safe
| 359
|
void ZRtp::setMultiStrParams(std::string parameters) {
char tmp[MAX_DIGEST_LENGTH + 1 + 1 + 1]; // max. hash length + cipher + authLength + hash
// First get negotiated hash from parameters, set algorithms and length
int i = parameters.at(0) & 0xff;
hash = &zrtpHashes.getByOrdinal(i);
setNegotiatedHash(hash); // sets hashlength
// use string.copy(buffer, num, start=0) to retrieve chars (bytes) from the string
parameters.copy(tmp, hashLength + 1 + 1 + 1, 0);
i = tmp[1] & 0xff;
authLength = &zrtpAuthLengths.getByOrdinal(i);
i = tmp[2] & 0xff;
cipher = &zrtpSymCiphers.getByOrdinal(i);
memcpy(zrtpSession, tmp+3, hashLength);
// after setting zrtpSession, cipher, and auth-length set multi-stream to true
multiStream = true;
stateEngine->setMultiStream(true);
}
|
safe
| 360
|
int mnt_optstr_fix_secontext(char **optstr,
char *value,
size_t valsz,
char **next)
{
int rc = 0;
char *p, *val, *begin, *end, *raw = NULL;
size_t sz;
if (!optstr || !*optstr || !value || !valsz)
return -EINVAL;
DBG(CXT, ul_debug("fixing SELinux context"));
begin = value;
end = value + valsz;
/* the selinux contexts are quoted */
if (*value == '"') {
if (valsz <= 2 || *(value + valsz - 1) != '"')
return -EINVAL; /* improperly quoted option string */
value++;
valsz -= 2;
}
p = strndup(value, valsz);
if (!p)
return -ENOMEM;
/* translate the context */
rc = selinux_trans_to_raw_context(p, &raw);
DBG(CXT, ul_debug("SELinux context '%s' translated to '%s'",
p, rc == -1 ? "FAILED" : (char *) raw));
free(p);
if (rc == -1 || !raw)
return -EINVAL;
/* create a quoted string from the raw context */
sz = strlen((char *) raw);
if (!sz)
return -EINVAL;
p = val = malloc(valsz + 3);
if (!val)
return -ENOMEM;
*p++ = '"';
memcpy(p, raw, sz);
p += sz;
*p++ = '"';
*p = '\0';
freecon(raw);
/* set new context */
mnt_optstr_remove_option_at(optstr, begin, end);
rc = insert_value(optstr, begin, val, next);
free(val);
return rc;
}
|
safe
| 361
|
void btrfs_assign_next_active_device(struct btrfs_device *device,
struct btrfs_device *this_dev)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_device *next_device;
if (this_dev)
next_device = this_dev;
else
next_device = btrfs_find_next_active_device(fs_info->fs_devices,
device);
ASSERT(next_device);
if (fs_info->sb->s_bdev &&
(fs_info->sb->s_bdev == device->bdev))
fs_info->sb->s_bdev = next_device->bdev;
if (fs_info->fs_devices->latest_bdev == device->bdev)
fs_info->fs_devices->latest_bdev = next_device->bdev;
}
|
safe
| 362
|
static int gpr32_get_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf,
unsigned long *regs)
{
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_MSR; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
if (count > 0 && pos == PT_MSR) {
reg = get_user_msr(target);
if (kbuf)
*k++ = reg;
else if (__put_user(reg, u++))
return -EFAULT;
++pos;
--count;
}
if (kbuf)
for (; count > 0 && pos < PT_REGS_COUNT; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_REGS_COUNT; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
PT_REGS_COUNT * sizeof(reg), -1);
}
|
safe
| 363
|
static bool chown_cgroup(const char *cgroup_path, struct lxc_conf *conf)
{
struct chown_data data;
char **slist = subsystems;
int i;
if (lxc_list_empty(&conf->id_map))
/* If there's no mapping then we don't need to chown */
return true;
data.cgroup_path = cgroup_path;
data.origuid = geteuid();
/* Unpriv users can't chown it themselves, so chown from
* a child namespace mapping both our own and the target uid
*/
if (userns_exec_1(conf, chown_cgroup_wrapper, &data) < 0) {
ERROR("Error requesting cgroup chown in new namespace");
return false;
}
/*
* Now chmod 775 the directory else the container cannot create cgroups.
* This can't be done in the child namespace because it only group-owns
* the cgroup
*/
if (cgm_supports_multiple_controllers)
slist = subsystems_inone;
for (i = 0; slist[i]; i++) {
if (!lxc_cgmanager_chmod(slist[i], cgroup_path, "", 0775))
return false;
if (!lxc_cgmanager_chmod(slist[i], cgroup_path, "tasks", 0775))
return false;
if (!lxc_cgmanager_chmod(slist[i], cgroup_path, "cgroup.procs", 0775))
return false;
}
return true;
}
|
safe
| 364
|
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
OP_REQUIRES(ctx, ctx->input(1).NumElements() > 0,
errors::InvalidArgument("Input min must not be empty."));
OP_REQUIRES(ctx, ctx->input(2).NumElements() > 0,
errors::InvalidArgument("Input max must not be empty."));
const float input_min_float = ctx->input(1).flat<float>()(0);
const float input_max_float = ctx->input(2).flat<float>()(0);
Tensor* output_min = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output_min));
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_max));
qint32 used_min_quantized;
qint32 used_max_quantized;
CalculateUsedRange(input, &used_min_quantized, &used_max_quantized);
// We want to make sure that the minimum is no larger than zero, so that the
// convolution operation can run efficiently.
const float used_min_float = std::min(
0.0f,
QuantizedToFloat(used_min_quantized, input_min_float, input_max_float));
const float used_max_float =
QuantizedToFloat(used_max_quantized, input_min_float, input_max_float);
output_min->flat<float>().setConstant(used_min_float);
output_max->flat<float>().setConstant(used_max_float);
}
|
safe
| 365
|
static inline void snow_set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type){
const int w= s->b_width << s->block_max_depth;
const int rem_depth= s->block_max_depth - level;
const int index= (x + y*w) << rem_depth;
const int block_w= 1<<rem_depth;
BlockNode block;
int i,j;
block.color[0]= l;
block.color[1]= cb;
block.color[2]= cr;
block.mx= mx;
block.my= my;
block.ref= ref;
block.type= type;
block.level= level;
for(j=0; j<block_w; j++){
for(i=0; i<block_w; i++){
s->block[index + i + j*w]= block;
}
}
}
|
safe
| 366
|
void CLASS get_timestamp (int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i=19; i--; ) str[i] = fgetc(ifp);
else
fread (str, 19, 1, ifp);
memset (&t, 0, sizeof t);
if (sscanf (str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon,
&t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
|
safe
| 367
|
srs_forward_alloc(srs_t *srs, char **sptr,
const char *sender, const char *alias)
{
char *buf;
int slen;
int alen;
int len;
int ret;
if (srs->noforward)
return SRS_ENOTREWRITTEN;
slen = strlen(sender);
alen = strlen(alias);
/* strlen(SRSxTAG) + strlen("====+@") < 64 */
len = slen + alen + srs->hashlength + SRS_TIME_SIZE + 64;
buf = (char *)srs_f_malloc(len);
ret = srs_forward(srs, buf, len, sender, alias);
if (ret == SRS_SUCCESS)
*sptr = buf;
else
srs_f_free(buf);
return ret;
}
|
safe
| 368
|
fixup_cattrs (MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint32 type, i, idx, token;
MonoObject *ctor;
table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE];
for (i = 0; i < table->rows; ++i) {
values = table->values + ((i + 1) * MONO_CUSTOM_ATTR_SIZE);
type = values [MONO_CUSTOM_ATTR_TYPE];
if ((type & MONO_CUSTOM_ATTR_TYPE_MASK) == MONO_CUSTOM_ATTR_TYPE_METHODDEF) {
idx = type >> MONO_CUSTOM_ATTR_TYPE_BITS;
token = mono_metadata_make_token (MONO_TABLE_METHOD, idx);
ctor = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
g_assert (ctor);
if (!strcmp (ctor->vtable->klass->name, "MonoCMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)ctor)->method;
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m));
values [MONO_CUSTOM_ATTR_TYPE] = (idx << MONO_CUSTOM_ATTR_TYPE_BITS) | MONO_CUSTOM_ATTR_TYPE_METHODDEF;
}
}
}
}
|
safe
| 369
|
static void _isdn_setup(struct net_device *dev)
{
isdn_net_local *lp = netdev_priv(dev);
ether_setup(dev);
/* Setup the generic properties */
dev->flags = IFF_NOARP|IFF_POINTOPOINT;
/* isdn prepends a header in the tx path, can't share skbs */
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->header_ops = NULL;
dev->netdev_ops = &isdn_netdev_ops;
/* for clients with MPPP maybe higher values better */
dev->tx_queue_len = 30;
lp->p_encap = ISDN_NET_ENCAP_RAWIP;
lp->magic = ISDN_NET_MAGIC;
lp->last = lp;
lp->next = lp;
lp->isdn_device = -1;
lp->isdn_channel = -1;
lp->pre_device = -1;
lp->pre_channel = -1;
lp->exclusive = -1;
lp->ppp_slot = -1;
lp->pppbind = -1;
skb_queue_head_init(&lp->super_tx_queue);
lp->l2_proto = ISDN_PROTO_L2_X75I;
lp->l3_proto = ISDN_PROTO_L3_TRANS;
lp->triggercps = 6000;
lp->slavedelay = 10 * HZ;
lp->hupflags = ISDN_INHUP; /* Do hangup even on incoming calls */
lp->onhtime = 10; /* Default hangup-time for saving costs */
lp->dialmax = 1;
/* Hangup before Callback, manual dial */
lp->flags = ISDN_NET_CBHUP | ISDN_NET_DM_MANUAL;
lp->cbdelay = 25; /* Wait 5 secs before Callback */
lp->dialtimeout = -1; /* Infinite Dial-Timeout */
lp->dialwait = 5 * HZ; /* Wait 5 sec. after failed dial */
lp->dialstarted = 0; /* Jiffies of last dial-start */
lp->dialwait_timer = 0; /* Jiffies of earliest next dial-start */
}
|
safe
| 370
|
static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
{
struct msg_msg *msg;
long count = 0;
list_for_each_entry(msg, &msq->q_messages, m_list) {
if (testmsg(msg, *msgtyp, mode) &&
!security_msg_queue_msgrcv(msq, msg, current,
*msgtyp, mode)) {
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
*msgtyp = msg->m_type - 1;
} else if (mode == SEARCH_NUMBER) {
if (*msgtyp == count)
return msg;
} else
return msg;
count++;
}
}
return ERR_PTR(-EAGAIN);
}
|
safe
| 371
|
static void fts3ColumnFilter(
int iCol, /* Column to filter on */
int bZero, /* Zero out anything following *ppList */
char **ppList, /* IN/OUT: Pointer to position list */
int *pnList /* IN/OUT: Size of buffer *ppList in bytes */
){
char *pList = *ppList;
int nList = *pnList;
char *pEnd = &pList[nList];
int iCurrent = 0;
char *p = pList;
assert( iCol>=0 );
while( 1 ){
char c = 0;
while( p<pEnd && (c | *p)&0xFE ) c = *p++ & 0x80;
if( iCol==iCurrent ){
nList = (int)(p - pList);
break;
}
nList -= (int)(p - pList);
pList = p;
if( nList<=0 ){
break;
}
p = &pList[1];
p += fts3GetVarint32(p, &iCurrent);
}
if( bZero && (pEnd - &pList[nList])>0){
memset(&pList[nList], 0, pEnd - &pList[nList]);
}
*ppList = pList;
*pnList = nList;
}
|
safe
| 372
|
static int ip_vs_genl_set_config(struct nlattr **attrs)
{
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(&t);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
t.tcp_fin_timeout =
nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
return ip_vs_set_timeout(&t);
}
|
safe
| 373
|
PJ_DEF(pj_status_t) pjsua_cancel_stun_resolution( void *token,
pj_bool_t notify_cb)
{
pjsua_stun_resolve *sess;
unsigned cancelled_count = 0;
PJSUA_LOCK();
sess = pjsua_var.stun_res.next;
while (sess != &pjsua_var.stun_res) {
pjsua_stun_resolve *next = sess->next;
if (sess->token == token) {
sess->has_result = PJ_TRUE;
sess->status = PJ_ECANCELLED;
if (notify_cb) {
pj_stun_resolve_result result;
pj_bzero(&result, sizeof(result));
result.token = token;
result.status = PJ_ECANCELLED;
sess->cb(&result);
}
++cancelled_count;
}
sess = next;
}
PJSUA_UNLOCK();
return cancelled_count ? PJ_SUCCESS : PJ_ENOTFOUND;
}
|
safe
| 374
|
bool OSDService::get_inc_map_bl(epoch_t e, bufferlist& bl)
{
Mutex::Locker l(map_cache_lock);
bool found = map_bl_inc_cache.lookup(e, &bl);
if (found) {
if (logger)
logger->inc(l_osd_map_bl_cache_hit);
return true;
}
if (logger)
logger->inc(l_osd_map_bl_cache_miss);
found = store->read(coll_t::meta(),
OSD::get_inc_osdmap_pobject_name(e), 0, 0, bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED) >= 0;
if (found) {
_add_map_inc_bl(e, bl);
}
return found;
}
|
safe
| 375
|
static void tcp_openreq_init(struct request_sock *req,
const struct tcp_options_received *rx_opt,
struct sk_buff *skb, const struct sock *sk)
{
struct inet_request_sock *ireq = inet_rsk(req);
req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */
req->cookie_ts = 0;
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
skb_mstamp_get(&tcp_rsk(req)->snt_synack);
tcp_rsk(req)->last_oow_ack_time = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
ireq->tstamp_ok = rx_opt->tstamp_ok;
ireq->sack_ok = rx_opt->sack_ok;
ireq->snd_wscale = rx_opt->snd_wscale;
ireq->wscale_ok = rx_opt->wscale_ok;
ireq->acked = 0;
ireq->ecn_ok = 0;
ireq->ir_rmt_port = tcp_hdr(skb)->source;
ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
ireq->ir_mark = inet_request_mark(sk, skb);
}
|
safe
| 376
|
png_check_chunk_length(png_const_structrp png_ptr, const png_uint_32 length)
{
png_alloc_size_t limit = PNG_UINT_31_MAX;
# ifdef PNG_SET_USER_LIMITS_SUPPORTED
if (png_ptr->user_chunk_malloc_max > 0 &&
png_ptr->user_chunk_malloc_max < limit)
limit = png_ptr->user_chunk_malloc_max;
# elif PNG_USER_CHUNK_MALLOC_MAX > 0
if (PNG_USER_CHUNK_MALLOC_MAX < limit)
limit = PNG_USER_CHUNK_MALLOC_MAX;
# endif
if (png_ptr->chunk_name == png_IDAT)
{
png_alloc_size_t idat_limit = PNG_UINT_31_MAX;
size_t row_factor =
(size_t)png_ptr->width
* (size_t)png_ptr->channels
* (png_ptr->bit_depth > 8? 2: 1)
+ 1
+ (png_ptr->interlaced? 6: 0);
if (png_ptr->height > PNG_UINT_32_MAX/row_factor)
idat_limit = PNG_UINT_31_MAX;
else
idat_limit = png_ptr->height * row_factor;
row_factor = row_factor > 32566? 32566 : row_factor;
idat_limit += 6 + 5*(idat_limit/row_factor+1); /* zlib+deflate overhead */
idat_limit=idat_limit < PNG_UINT_31_MAX? idat_limit : PNG_UINT_31_MAX;
limit = limit < idat_limit? idat_limit : limit;
}
if (length > limit)
{
png_debug2(0," length = %lu, limit = %lu",
(unsigned long)length,(unsigned long)limit);
png_chunk_error(png_ptr, "chunk data is too large");
}
}
|
safe
| 377
|
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
int (*cb_done)(struct sctp_transport *, void *),
struct net *net, int *pos, void *p) {
struct rhashtable_iter hti;
struct sctp_transport *tsp;
int ret;
again:
ret = 0;
sctp_transport_walk_start(&hti);
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
ret = cb(tsp, p);
if (ret)
break;
(*pos)++;
sctp_transport_put(tsp);
}
sctp_transport_walk_stop(&hti);
if (ret) {
if (cb_done && !cb_done(tsp, p)) {
(*pos)++;
sctp_transport_put(tsp);
goto again;
}
sctp_transport_put(tsp);
}
return ret;
}
|
safe
| 378
|
static int ext4_mknod(struct inode *dir, struct dentry *dentry,
int mode, dev_t rdev)
{
handle_t *handle;
struct inode *inode;
int err, retries = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
retry:
handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
inode = ext4_new_inode(handle, dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
#ifdef CONFIG_EXT4_FS_XATTR
inode->i_op = &ext4_special_inode_operations;
#endif
err = ext4_add_nondir(handle, dentry, inode);
}
ext4_journal_stop(handle);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
|
safe
| 379
|
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task,
bool cancel_all)
{
struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
struct io_uring_task *tctx = task ? task->io_uring : NULL;
while (1) {
enum io_wq_cancel cret;
bool ret = false;
if (!task) {
ret |= io_uring_try_cancel_iowq(ctx);
} else if (tctx && tctx->io_wq) {
/*
* Cancels requests of all rings, not only @ctx, but
* it's fine as the task is in exit/exec.
*/
cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
&cancel, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
/* SQPOLL thread does its own polling */
if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
(ctx->sq_data && ctx->sq_data->thread == current)) {
while (!list_empty_careful(&ctx->iopoll_list)) {
io_iopoll_try_reap_events(ctx);
ret = true;
}
}
ret |= io_cancel_defer_files(ctx, task, cancel_all);
ret |= io_poll_remove_all(ctx, task, cancel_all);
ret |= io_kill_timeouts(ctx, task, cancel_all);
if (task)
ret |= io_run_task_work();
if (!ret)
break;
cond_resched();
}
|
safe
| 380
|
SYSCALL_DEFINE3(get_robust_list, int, pid,
struct robust_list_head __user * __user *, head_ptr,
size_t __user *, len_ptr)
{
struct robust_list_head __user *head;
unsigned long ret;
struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
rcu_read_lock();
ret = -ESRCH;
if (!pid)
p = current;
else {
p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
}
ret = -EPERM;
if (!ptrace_may_access(p, PTRACE_MODE_READ))
goto err_unlock;
head = p->robust_list;
rcu_read_unlock();
if (put_user(sizeof(*head), len_ptr))
return -EFAULT;
return put_user(head, head_ptr);
err_unlock:
rcu_read_unlock();
return ret;
}
|
safe
| 381
|
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
} else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
|
safe
| 382
|
static int su3000_rc_query(struct dvb_usb_device *d)
{
u8 key[2];
struct i2c_msg msg = {
.addr = DW2102_RC_QUERY,
.flags = I2C_M_RD,
.buf = key,
.len = 2
};
if (d->props.i2c_algo->master_xfer(&d->i2c_adap, &msg, 1) == 1) {
if (msg.buf[0] != 0xff) {
deb_rc("%s: rc code: %x, %x\n",
__func__, key[0], key[1]);
rc_keydown(d->rc_dev, RC_TYPE_RC5,
RC_SCANCODE_RC5(key[1], key[0]), 0);
}
}
return 0;
}
|
safe
| 383
|
JsVar *jspGetVarNamedField(JsVar *object, JsVar *nameVar, bool returnName) {
JsVar *child = 0;
// if we're an object (or pretending to be one)
if (jsvHasChildren(object))
child = jsvFindChildFromVar(object, nameVar, false);
if (!child) {
if (jsvIsArrayBuffer(object) && jsvIsInt(nameVar)) {
// for array buffers, we actually create a NAME, and hand that back - then when we assign (or use SkipName) we pull out the correct data
child = jsvMakeIntoVariableName(jsvNewFromInteger(jsvGetInteger(nameVar)), object);
if (child) // turn into an 'array buffer name'
child->flags = (child->flags & ~JSV_VARTYPEMASK) | JSV_ARRAYBUFFERNAME;
} else if (jsvIsString(object) && jsvIsInt(nameVar)) {
JsVarInt idx = jsvGetInteger(nameVar);
if (idx>=0 && idx<(JsVarInt)jsvGetStringLength(object)) {
char ch = jsvGetCharInString(object, (size_t)idx);
child = jsvNewStringOfLength(1, &ch);
} else if (returnName)
child = jsvCreateNewChild(object, nameVar, 0); // just return *something* to show this is handled
} else {
// get the name as a string
char name[JSLEX_MAX_TOKEN_LENGTH];
jsvGetString(nameVar, name, JSLEX_MAX_TOKEN_LENGTH);
// try and find it in parents
child = jspGetNamedFieldInParents(object, name, returnName);
// If not found and is the prototype, create it
if (!child && jsvIsFunction(object) && jsvIsStringEqual(nameVar, JSPARSE_PROTOTYPE_VAR)) {
JsVar *value = jsvNewObject(); // prototype is supposed to be an object
child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR);
jsvUnLock(value);
}
}
}
if (returnName) return child;
else return jsvSkipNameAndUnLock(child);
}
|
safe
| 384
|
SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
{
struct task_struct *p;
struct task_struct *group_leader = current->group_leader;
struct pid *pgrp;
int err;
if (!pid)
pid = task_pid_vnr(group_leader);
if (!pgid)
pgid = pid;
if (pgid < 0)
return -EINVAL;
rcu_read_lock();
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
*/
write_lock_irq(&tasklist_lock);
err = -ESRCH;
p = find_task_by_vpid(pid);
if (!p)
goto out;
err = -EINVAL;
if (!thread_group_leader(p))
goto out;
if (same_thread_group(p->real_parent, group_leader)) {
err = -EPERM;
if (task_session(p) != task_session(group_leader))
goto out;
err = -EACCES;
if (p->did_exec)
goto out;
} else {
err = -ESRCH;
if (p != group_leader)
goto out;
}
err = -EPERM;
if (p->signal->leader)
goto out;
pgrp = task_pid(p);
if (pgid != pid) {
struct task_struct *g;
pgrp = find_vpid(pgid);
g = pid_task(pgrp, PIDTYPE_PGID);
if (!g || task_session(g) != task_session(group_leader))
goto out;
}
err = security_task_setpgid(p, pgid);
if (err)
goto out;
if (task_pgrp(p) != pgrp)
change_pid(p, PIDTYPE_PGID, pgrp);
err = 0;
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
return err;
}
|
safe
| 385
|
TEST_P(DownstreamProtocolIntegrationTest, ComputedHealthCheck) {
config_helper_.addFilter(R"EOF(
name: health_check
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck
pass_through_mode: false
cluster_min_healthy_percentages:
example_cluster_name: { value: 75 }
)EOF");
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{
{":method", "GET"}, {":path", "/healthcheck"}, {":scheme", "http"}, {":authority", "host"}});
ASSERT_TRUE(response->waitForEndStream());
EXPECT_TRUE(response->complete());
EXPECT_EQ("503", response->headers().getStatusValue());
}
|
safe
| 386
|
bool TABLE::validate_default_values_of_unset_fields(THD *thd) const
{
DBUG_ENTER("TABLE::validate_default_values_of_unset_fields");
for (Field **fld= field; *fld; fld++)
{
if (!bitmap_is_set(write_set, (*fld)->field_index) &&
!((*fld)->flags & (NO_DEFAULT_VALUE_FLAG | VERS_SYSTEM_FIELD)))
{
if (!(*fld)->is_null_in_record(s->default_values) &&
(*fld)->validate_value_in_record_with_warn(thd, s->default_values) &&
thd->is_error())
{
/*
We're here if:
- validate_value_in_record_with_warn() failed and
strict mo validate_default_values_of_unset_fieldsde converted WARN to ERROR
- or the connection was killed, or closed unexpectedly
*/
DBUG_RETURN(true);
}
}
}
DBUG_RETURN(false);
}
|
safe
| 387
|
static int qrtr_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct qrtr_sock *ipc;
struct sock *sk;
if (sock->type != SOCK_DGRAM)
return -EPROTOTYPE;
sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
if (!sk)
return -ENOMEM;
sock_set_flag(sk, SOCK_ZAPPED);
sock_init_data(sock, sk);
sock->ops = &qrtr_proto_ops;
ipc = qrtr_sk(sk);
ipc->us.sq_family = AF_QIPCRTR;
ipc->us.sq_node = qrtr_local_nid;
ipc->us.sq_port = 0;
return 0;
}
|
safe
| 388
|
midi_synth_open(int dev, int mode)
{
int orig_dev = synth_devs[dev]->midi_dev;
int err;
struct midi_input_info *inc;
if (orig_dev < 0 || orig_dev >= num_midis || midi_devs[orig_dev] == NULL)
return -ENXIO;
midi2synth[orig_dev] = dev;
sysex_state[dev] = 0;
prev_out_status[orig_dev] = 0;
if ((err = midi_devs[orig_dev]->open(orig_dev, mode,
midi_synth_input, midi_synth_output)) < 0)
return err;
inc = &midi_devs[orig_dev]->in_info;
/* save_flags(flags);
cli();
don't know against what irqhandler to protect*/
inc->m_busy = 0;
inc->m_state = MST_INIT;
inc->m_ptr = 0;
inc->m_left = 0;
inc->m_prev_status = 0x00;
/* restore_flags(flags); */
return 1;
}
|
safe
| 389
|
QTN2QT(QTNode *in)
{
TSQuery out;
int len;
int sumlen = 0,
nnode = 0;
QTN2QTState state;
cntsize(in, &sumlen, &nnode);
if (TSQUERY_TOO_BIG(nnode, sumlen))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("tsquery is too large")));
len = COMPUTESIZE(nnode, sumlen);
out = (TSQuery) palloc0(len);
SET_VARSIZE(out, len);
out->size = nnode;
state.curitem = GETQUERY(out);
state.operand = state.curoperand = GETOPERAND(out);
fillQT(&state, in);
return out;
}
|
safe
| 390
|
void Filter::onUpstreamTimeoutAbort(StreamInfo::ResponseFlag response_flags,
absl::string_view details) {
Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = cluster()->timeoutBudgetStats();
if (tb_stats.has_value()) {
Event::Dispatcher& dispatcher = callbacks_->dispatcher();
std::chrono::milliseconds response_time = std::chrono::duration_cast<std::chrono::milliseconds>(
dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_);
tb_stats->get().upstream_rq_timeout_budget_percent_used_.recordValue(
FilterUtility::percentageOfTimeout(response_time, timeout_.global_timeout_));
}
const absl::string_view body =
timeout_response_code_ == Http::Code::GatewayTimeout ? "upstream request timeout" : "";
onUpstreamAbort(timeout_response_code_, response_flags, body, false, details);
}
|
safe
| 391
|
static int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
ktime_t expires = ktime_sub(timer->expires, base->offset);
int res;
/*
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interupt context. The
* reprogramming is handled either by the softirq, which called the
* callback or at the end of the hrtimer_interrupt.
*/
if (hrtimer_callback_running(timer))
return 0;
if (expires.tv64 >= expires_next->tv64)
return 0;
/*
* Clockevents returns -ETIME, when the event was in the past.
*/
res = tick_program_event(expires, 0);
if (!IS_ERR_VALUE(res))
*expires_next = expires;
return res;
}
|
safe
| 392
|
BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
{
BN_ULONG carry=0;
BN_ULONG bl,bh;
assert(num >= 0);
if (num <= 0) return((BN_ULONG)0);
bl=LBITS(w);
bh=HBITS(w);
#ifndef OPENSSL_SMALL_FOOTPRINT
while (num&~3)
{
mul(rp[0],ap[0],bl,bh,carry);
mul(rp[1],ap[1],bl,bh,carry);
mul(rp[2],ap[2],bl,bh,carry);
mul(rp[3],ap[3],bl,bh,carry);
ap+=4; rp+=4; num-=4;
}
#endif
while (num)
{
mul(rp[0],ap[0],bl,bh,carry);
ap++; rp++; num--;
}
return(carry);
}
|
safe
| 393
|
dissect_kafka_sasl_authenticate_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
offset = dissect_kafka_string(tree, hf_kafka_error_message, tvb, pinfo, offset, api_version >= 2, NULL, NULL);
offset = dissect_kafka_bytes(tree, hf_kafka_sasl_auth_bytes, tvb, pinfo, offset, api_version >= 2, NULL, NULL);
if (api_version >= 1) {
offset = dissect_kafka_int64(tree, hf_kafka_session_lifetime_ms, tvb, pinfo, offset, NULL);
}
if (api_version >= 2) {
offset = dissect_kafka_tagged_fields(tvb, pinfo, tree, offset, 0);
}
return offset;
}
|
safe
| 394
|
hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type,
const hfs_btree_key_cat * cur_key,
TSK_OFF_T key_off, void *ptr)
{
HFS_CAT_GET_RECORD_OFFSET_DATA *offset_data = (HFS_CAT_GET_RECORD_OFFSET_DATA *)ptr;
const hfs_btree_key_cat *targ_key = offset_data->targ_key;
if (tsk_verbose)
tsk_fprintf(stderr,
"hfs_cat_get_record_offset_cb: %s node want: %" PRIu32
" vs have: %" PRIu32 "\n",
(level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf",
tsk_getu32(hfs->fs_info.endian, targ_key->parent_cnid),
tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid));
if (level_type == HFS_BT_NODE_TYPE_IDX) {
int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key);
if (diff < 0)
return HFS_BTREE_CB_IDX_LT;
else
return HFS_BTREE_CB_IDX_EQGT;
}
else {
int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key);
// see if this record is for our file or if we passed the interesting entries
if (diff < 0) {
return HFS_BTREE_CB_LEAF_GO;
}
else if (diff == 0) {
offset_data->off =
key_off + 2 + tsk_getu16(hfs->fs_info.endian,
cur_key->key_len);
}
return HFS_BTREE_CB_LEAF_STOP;
}
}
|
safe
| 395
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
/*
* Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
unsigned long userspace_addr;
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
memslot->userspace_addr = userspace_addr;
}
return 0;
}
|
safe
| 396
|
void prepareChroot(SpawnPreparationInfo &info, const Options &options) {
TRACE_POINT();
info.appRoot = absolutizePath(options.appRoot);
if (options.preexecChroot.empty()) {
info.chrootDir = "/";
} else {
info.chrootDir = absolutizePath(options.preexecChroot);
}
if (info.appRoot != info.chrootDir && startsWith(info.appRoot, info.chrootDir + "/")) {
SpawnException e("Invalid configuration: '" + info.chrootDir +
"' has been configured as the chroot jail, but the application " +
"root directory '" + info.appRoot + "' is not a subdirectory of the " +
"chroot directory, which it must be.");
throwSpawnException(e, options);
}
if (info.appRoot == info.chrootDir) {
info.appRootInsideChroot = "/";
} else if (info.chrootDir == "/") {
info.appRootInsideChroot = info.appRoot;
} else {
info.appRootInsideChroot = info.appRoot.substr(info.chrootDir.size());
}
}
|
safe
| 398
|
static u32 av1_decode_subexp(GF_BitStream *bs, s32 numSyms)
{
s32 i = 0;
s32 mk = 0;
s32 k = 3;
while (1) {
s32 b2 = i ? k + i - 1 : k;
s32 a = 1 << b2;
if (numSyms <= mk + 3 * a) {
s32 subexp_final_bits = av1_read_ns(bs, numSyms - mk, NULL);
return subexp_final_bits + mk;
}
else {
s32 subexp_more_bits = gf_bs_read_int(bs, 1);
if (subexp_more_bits) {
i++;
mk += a;
}
else {
s32 subexp_bits = gf_bs_read_int(bs, b2);
return subexp_bits + mk;
}
}
}
}
|
safe
| 399
|
bool ValidateRC5()
{
std::cout << "\nRC5 validation suite running...\n\n";
bool pass1 = true, pass2 = true;
RC5Encryption enc; // 0 to 2040-bits (255-bytes)
pass1 = RC5Encryption::DEFAULT_KEYLENGTH == 16 && pass1;
pass1 = enc.StaticGetValidKeyLength(0) == 0 && pass1;
pass1 = enc.StaticGetValidKeyLength(254) == 254 && pass1;
pass1 = enc.StaticGetValidKeyLength(255) == 255 && pass1;
pass1 = enc.StaticGetValidKeyLength(256) == 255 && pass1;
pass1 = enc.StaticGetValidKeyLength(0) == enc.MinKeyLength() && pass1;
pass1 = enc.StaticGetValidKeyLength(SIZE_MAX) == enc.MaxKeyLength() && pass1;
RC5Decryption dec;
pass2 = RC5Decryption::DEFAULT_KEYLENGTH == 16 && pass2;
pass2 = dec.StaticGetValidKeyLength(0) == 0 && pass2;
pass2 = dec.StaticGetValidKeyLength(254) == 254 && pass2;
pass2 = dec.StaticGetValidKeyLength(255) == 255 && pass2;
pass2 = dec.StaticGetValidKeyLength(256) == 255 && pass2;
pass2 = dec.StaticGetValidKeyLength(0) == dec.MinKeyLength() && pass2;
pass2 = dec.StaticGetValidKeyLength(SIZE_MAX) == dec.MaxKeyLength() && pass2;
std::cout << (pass1 && pass2 ? "passed:" : "FAILED:") << " Algorithm key lengths\n";
FileSource valdata(CRYPTOPP_DATA_DIR "TestData/rc5val.dat", true, new HexDecoder);
return BlockTransformationTest(VariableRoundsCipherFactory<RC5Encryption, RC5Decryption>(16, 12), valdata) && pass1 && pass2;
}
|
safe
| 400
|
srs_parse_shortcut(srs_t *srs, char *buf, unsigned buflen, char *senduser)
{
char *srshash;
char *srsstamp;
char *srshost;
char *srsuser;
int ret;
if (strncasecmp(senduser, SRS0TAG, 4) == 0) {
srshash = senduser + 5;
if (!STRINGP(srshash))
return SRS_ENOSRS0HASH;
srsstamp = strchr(srshash, SRSSEP);
if (!STRINGP(srsstamp))
return SRS_ENOSRS0STAMP;
*srsstamp++ = '\0';
srshost = strchr(srsstamp, SRSSEP);
if (!STRINGP(srshost))
return SRS_ENOSRS0HOST;
*srshost++ = '\0';
srsuser = strchr(srshost, SRSSEP);
if (!STRINGP(srsuser))
return SRS_ENOSRS0USER;
*srsuser++ = '\0';
ret = srs_timestamp_check(srs, srsstamp);
if (ret != SRS_SUCCESS)
return ret;
ret = srs_hash_check(srs, srshash, 3, srsstamp,
srshost, srsuser);
if (ret != SRS_SUCCESS)
return ret;
snprintf(buf, buflen, "%s@%s", srsuser, srshost);
return SRS_SUCCESS;
}
return SRS_ENOTSRSADDRESS;
}
|
safe
| 401
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.