atomic_set(&eb->read_locks, 0);
atomic_set(&eb->blocking_readers, 0);
atomic_set(&eb->blocking_writers, 0);
- atomic_set(&eb->spinning_readers, 0);
eb->lock_nested = 0;
init_waitqueue_head(&eb->write_lock_wq);
init_waitqueue_head(&eb->read_lock_wq);
#ifdef CONFIG_BTRFS_DEBUG
atomic_set(&eb->spinning_writers, 0);
+ atomic_set(&eb->spinning_readers, 0);
#endif
return eb;
atomic_t read_locks;
atomic_t blocking_writers;
atomic_t blocking_readers;
- atomic_t spinning_readers;
short lock_nested;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
short log_index;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG
atomic_t spinning_writers;
+ atomic_t spinning_readers;
struct list_head leak_list;
#endif
};
return;
btrfs_assert_tree_read_locked(eb);
atomic_inc(&eb->blocking_readers);
- WARN_ON(atomic_read(&eb->spinning_readers) == 0);
- atomic_dec(&eb->spinning_readers);
+ btrfs_assert_spinning_readers_put(eb);
read_unlock(&eb->lock);
}
return;
BUG_ON(atomic_read(&eb->blocking_readers) == 0);
read_lock(&eb->lock);
- atomic_inc(&eb->spinning_readers);
+ btrfs_assert_spinning_readers_get(eb);
/* atomic_dec_and_test implies a barrier */
if (atomic_dec_and_test(&eb->blocking_readers))
cond_wake_up_nomb(&eb->read_lock_wq);
goto again;
}
atomic_inc(&eb->read_locks);
- atomic_inc(&eb->spinning_readers);
+ btrfs_assert_spinning_readers_get(eb);
}
/*
return 0;
}
atomic_inc(&eb->read_locks);
- atomic_inc(&eb->spinning_readers);
+ btrfs_assert_spinning_readers_get(eb);
return 1;
}
return 0;
}
atomic_inc(&eb->read_locks);
- atomic_inc(&eb->spinning_readers);
+ btrfs_assert_spinning_readers_get(eb);
return 1;
}
return;
}
btrfs_assert_tree_read_locked(eb);
- WARN_ON(atomic_read(&eb->spinning_readers) == 0);
- atomic_dec(&eb->spinning_readers);
+ btrfs_assert_spinning_readers_put(eb);
atomic_dec(&eb->read_locks);
read_unlock(&eb->lock);
}