get_random_bytes(&f->rnd, sizeof(u32));
for (i = 0; i < INETFRAGS_HASHSZ; i++) {
struct inet_frag_queue *q;
- struct hlist_node *p, *n;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
+ hlist_for_each_entry_safe(q, n, &f->hash[i], list) {
unsigned int hval = f->hashfn(q);
if (hval != i) {
{
struct inet_frag_queue *qp;
#ifdef CONFIG_SMP
- struct hlist_node *n;
#endif
unsigned int hash;
* such entry could be created on other cpu, while we
* promoted read lock to write lock.
*/
- hlist_for_each_entry(qp, n, &f->hash[hash], list) {
+ hlist_for_each_entry(qp, &f->hash[hash], list) {
if (qp->net == nf && f->match(qp, arg)) {
atomic_inc(&qp->refcnt);
write_unlock(&f->lock);
__releases(&f->lock)
{
struct inet_frag_queue *q;
- struct hlist_node *n;
- hlist_for_each_entry(q, n, &f->hash[hash], list) {
+ hlist_for_each_entry(q, &f->hash[hash], list) {
if (q->net == nf && f->match(q, key)) {
atomic_inc(&q->refcnt);
read_unlock(&f->lock);