if (*size < sizeof(*ce))
return -EINVAL;
- ce = (struct ebt_entry __user *)*dstptr;
+ ce = *dstptr;
if (copy_to_user(ce, e, sizeof(*ce)))
return -EFAULT;
*/
for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
- struct arpt_entry *e
- = (struct arpt_entry *)(entry0 + pos);
+ struct arpt_entry *e = entry0 + pos;
if (!(valid_hooks & (1 << hook)))
continue;
if (pos == oldpos)
goto next;
- e = (struct arpt_entry *)
- (entry0 + pos);
+ e = entry0 + pos;
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
- e = (struct arpt_entry *)
- (entry0 + pos + size);
+ e = entry0 + pos + size;
if (pos + size >= newinfo->size)
return 0;
e->counters.pcnt = pos;
if (!xt_find_jump_offset(offsets, newpos,
newinfo->number))
return 0;
- e = (struct arpt_entry *)
- (entry0 + newpos);
+ e = entry0 + newpos;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
if (newpos >= newinfo->size)
return 0;
}
- e = (struct arpt_entry *)
- (entry0 + newpos);
+ e = entry0 + newpos;
e->counters.pcnt = pos;
pos = newpos;
}
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
const struct xt_entry_target *t;
- e = (struct arpt_entry *)(loc_cpu_entry + off);
+ e = loc_cpu_entry + off;
if (copy_to_user(userptr + off, e, sizeof(*e))) {
ret = -EFAULT;
goto free_counters;
int h;
origsize = *size;
- de = (struct arpt_entry *)*dstptr;
+ de = *dstptr;
memcpy(de, e, sizeof(struct arpt_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
int ret;
origsize = *size;
- ce = (struct compat_arpt_entry __user *)*dstptr;
+ ce = *dstptr;
if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
to 0 as we leave), and comefrom to save source hook bitmask */
for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
- struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
+ struct ipt_entry *e = entry0 + pos;
if (!(valid_hooks & (1 << hook)))
continue;
if (pos == oldpos)
goto next;
- e = (struct ipt_entry *)
- (entry0 + pos);
+ e = entry0 + pos;
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
- e = (struct ipt_entry *)
- (entry0 + pos + size);
+ e = entry0 + pos + size;
if (pos + size >= newinfo->size)
return 0;
e->counters.pcnt = pos;
if (!xt_find_jump_offset(offsets, newpos,
newinfo->number))
return 0;
- e = (struct ipt_entry *)
- (entry0 + newpos);
+ e = entry0 + newpos;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
if (newpos >= newinfo->size)
return 0;
}
- e = (struct ipt_entry *)
- (entry0 + newpos);
+ e = entry0 + newpos;
e->counters.pcnt = pos;
pos = newpos;
}
const struct xt_entry_match *m;
const struct xt_entry_target *t;
- e = (struct ipt_entry *)(loc_cpu_entry + off);
+ e = loc_cpu_entry + off;
if (copy_to_user(userptr + off, e, sizeof(*e))) {
ret = -EFAULT;
goto free_counters;
int ret = 0;
origsize = *size;
- ce = (struct compat_ipt_entry __user *)*dstptr;
+ ce = *dstptr;
if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
struct xt_entry_match *ematch;
origsize = *size;
- de = (struct ipt_entry *)*dstptr;
+ de = *dstptr;
memcpy(de, e, sizeof(struct ipt_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
to 0 as we leave), and comefrom to save source hook bitmask */
for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
- struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
+ struct ip6t_entry *e = entry0 + pos;
if (!(valid_hooks & (1 << hook)))
continue;
if (pos == oldpos)
goto next;
- e = (struct ip6t_entry *)
- (entry0 + pos);
+ e = entry0 + pos;
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
- e = (struct ip6t_entry *)
- (entry0 + pos + size);
+ e = entry0 + pos + size;
if (pos + size >= newinfo->size)
return 0;
e->counters.pcnt = pos;
if (!xt_find_jump_offset(offsets, newpos,
newinfo->number))
return 0;
- e = (struct ip6t_entry *)
- (entry0 + newpos);
+ e = entry0 + newpos;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
if (newpos >= newinfo->size)
return 0;
}
- e = (struct ip6t_entry *)
- (entry0 + newpos);
+ e = entry0 + newpos;
e->counters.pcnt = pos;
pos = newpos;
}
const struct xt_entry_match *m;
const struct xt_entry_target *t;
- e = (struct ip6t_entry *)(loc_cpu_entry + off);
+ e = loc_cpu_entry + off;
if (copy_to_user(userptr + off, e, sizeof(*e))) {
ret = -EFAULT;
goto free_counters;
int ret = 0;
origsize = *size;
- ce = (struct compat_ip6t_entry __user *)*dstptr;
+ ce = *dstptr;
if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
struct xt_entry_match *ematch;
origsize = *size;
- de = (struct ip6t_entry *)*dstptr;
+ de = *dstptr;
memcpy(de, e, sizeof(struct ip6t_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
if (!test_bit(id, map->members) ||
(SET_WITH_TIMEOUT(set) &&
#ifdef IP_SET_BITMAP_STORED_TIMEOUT
- mtype_is_filled((const struct mtype_elem *)x) &&
+ mtype_is_filled(x) &&
#endif
ip_set_timeout_expired(ext_timeout(x, set))))
continue;
}
if (mtype_do_list(skb, map, id, set->dsize))
goto nla_put_failure;
- if (ip_set_put_extensions(skb, set, x,
- mtype_is_filled((const struct mtype_elem *)x)))
+ if (ip_set_put_extensions(skb, set, x, mtype_is_filled(x)))
goto nla_put_failure;
ipset_nest_end(skb, nested);
}
ret = -EFAULT;
goto done;
}
- op = (unsigned int *)data;
+ op = data;
if (*op < IP_SET_OP_VERSION) {
/* Check the version at the beginning of operations */
static int kill_l4proto(struct nf_conn *i, void *data)
{
struct nf_conntrack_l4proto *l4proto;
- l4proto = (struct nf_conntrack_l4proto *)data;
+ l4proto = data;
return nf_ct_protonum(i) == l4proto->l4proto &&
nf_ct_l3num(i) == l4proto->l3proto;
}
static void nft_hash_elem_destroy(void *ptr, void *arg)
{
- nft_set_elem_destroy((const struct nft_set *)arg, ptr, true);
+ nft_set_elem_destroy(arg, ptr, true);
}
static void nft_hash_destroy(const struct nft_set *set)
cfg_copy(struct hashlimit_cfg2 *to, void *from, int revision)
{
if (revision == 1) {
- struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from;
+ struct hashlimit_cfg1 *cfg = from;
to->mode = cfg->mode;
to->avg = cfg->avg;
static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct xt_hashlimit_htable *htable = s->private;
- unsigned int *bucket = (unsigned int *)v;
+ unsigned int *bucket = v;
*pos = ++(*bucket);
if (*pos >= htable->cfg.size) {
__releases(htable->lock)
{
struct xt_hashlimit_htable *htable = s->private;
- unsigned int *bucket = (unsigned int *)v;
+ unsigned int *bucket = v;
if (!IS_ERR(bucket))
kfree(bucket);
static int dl_seq_show_v1(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = s->private;
- unsigned int *bucket = (unsigned int *)v;
+ unsigned int *bucket = v;
struct dsthash_ent *ent;
if (!hlist_empty(&htable->hash[*bucket])) {
static int dl_seq_show(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = s->private;
- unsigned int *bucket = (unsigned int *)v;
+ unsigned int *bucket = v;
struct dsthash_ent *ent;
if (!hlist_empty(&htable->hash[*bucket])) {