Revision 613131396262 () - Diff

Link to this snippet: https://friendpaste.com/6XrGXb5p0RSJGixUFZ8lCt
Embed:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 1da768e5ef75..7a0b740ed4ca 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -86,10 +86,26 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
void btrfs_tree_read_lock(struct extent_buffer *eb)
{
again:
- BUG_ON(!atomic_read(&eb->blocking_writers) &&
- current->pid == eb->lock_owner);
+ /*
+ * If we are already holding a spinning write lock, allow an additional
+ * spinning read lock. This is needed to solve deadlocks when updating
+ * free space cache inode items, as when COWing a tree root extent
+ * buffer, when calling extent-tree.c:find_free_extent(), we may find
+ * another block group which does not have its space cache loaded and
+ * then attempt to load it, which requires search for its free space
+ * cache inode item in the tree root which could be located in the
+ * extent buffer that we previously write locked.
+ */
+ if (read_trylock(&eb->lock)) {
+ goto got_read_lock;
+ } else if (eb->lock_owner == current->pid) {
+ BUG_ON(eb->lock_nested);
+ eb->lock_nested = 1;
+ return;
+ }
read_lock(&eb->lock);
+got_read_lock:
if (atomic_read(&eb->blocking_writers) &&
current->pid == eb->lock_owner) {
/*
@@ -142,8 +158,14 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
if (atomic_read(&eb->blocking_writers))
return 0;
- if (!read_trylock(&eb->lock))
+ if (!read_trylock(&eb->lock)) {
+ if (eb->lock_owner == current->pid) {
+ BUG_ON(eb->lock_nested);
+ eb->lock_nested = 1;
+ return 1;
+ }
return 0;
+ }
if (atomic_read(&eb->blocking_writers)) {
read_unlock(&eb->lock);