]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - fs/btrfs/extent-tree.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[~shefty/rdma-dev.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include "compat.h"
26 #include "hash.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "free-space-cache.h"
34
35 static int update_reserved_extents(struct btrfs_root *root,
36                                    u64 bytenr, u64 num, int reserve);
37 static int update_block_group(struct btrfs_trans_handle *trans,
38                               struct btrfs_root *root,
39                               u64 bytenr, u64 num_bytes, int alloc,
40                               int mark_free);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60
61 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
62                           struct btrfs_root *extent_root, u64 alloc_bytes,
63                           u64 flags, int force);
64
65 static noinline int
66 block_group_cache_done(struct btrfs_block_group_cache *cache)
67 {
68         smp_mb();
69         return cache->cached == BTRFS_CACHE_FINISHED;
70 }
71
72 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
73 {
74         return (cache->flags & bits) == bits;
75 }
76
77 /*
78  * this adds the block group to the fs_info rb tree for the block group
79  * cache
80  */
81 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
82                                 struct btrfs_block_group_cache *block_group)
83 {
84         struct rb_node **p;
85         struct rb_node *parent = NULL;
86         struct btrfs_block_group_cache *cache;
87
88         spin_lock(&info->block_group_cache_lock);
89         p = &info->block_group_cache_tree.rb_node;
90
91         while (*p) {
92                 parent = *p;
93                 cache = rb_entry(parent, struct btrfs_block_group_cache,
94                                  cache_node);
95                 if (block_group->key.objectid < cache->key.objectid) {
96                         p = &(*p)->rb_left;
97                 } else if (block_group->key.objectid > cache->key.objectid) {
98                         p = &(*p)->rb_right;
99                 } else {
100                         spin_unlock(&info->block_group_cache_lock);
101                         return -EEXIST;
102                 }
103         }
104
105         rb_link_node(&block_group->cache_node, parent, p);
106         rb_insert_color(&block_group->cache_node,
107                         &info->block_group_cache_tree);
108         spin_unlock(&info->block_group_cache_lock);
109
110         return 0;
111 }
112
113 /*
114  * This will return the block group at or after bytenr if contains is 0, else
115  * it will return the block group that contains the bytenr
116  */
117 static struct btrfs_block_group_cache *
118 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
119                               int contains)
120 {
121         struct btrfs_block_group_cache *cache, *ret = NULL;
122         struct rb_node *n;
123         u64 end, start;
124
125         spin_lock(&info->block_group_cache_lock);
126         n = info->block_group_cache_tree.rb_node;
127
128         while (n) {
129                 cache = rb_entry(n, struct btrfs_block_group_cache,
130                                  cache_node);
131                 end = cache->key.objectid + cache->key.offset - 1;
132                 start = cache->key.objectid;
133
134                 if (bytenr < start) {
135                         if (!contains && (!ret || start < ret->key.objectid))
136                                 ret = cache;
137                         n = n->rb_left;
138                 } else if (bytenr > start) {
139                         if (contains && bytenr <= end) {
140                                 ret = cache;
141                                 break;
142                         }
143                         n = n->rb_right;
144                 } else {
145                         ret = cache;
146                         break;
147                 }
148         }
149         if (ret)
150                 atomic_inc(&ret->count);
151         spin_unlock(&info->block_group_cache_lock);
152
153         return ret;
154 }
155
156 /*
157  * We always set EXTENT_LOCKED for the super mirror extents so we don't
158  * overwrite them, so those bits need to be unset.  Also, if we are unmounting
159  * with pinned extents still sitting there because we had a block group caching,
160  * we need to clear those now, since we are done.
161  */
162 void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
163 {
164         u64 start, end, last = 0;
165         int ret;
166
167         while (1) {
168                 ret = find_first_extent_bit(&info->pinned_extents, last,
169                                             &start, &end,
170                                             EXTENT_LOCKED|EXTENT_DIRTY);
171                 if (ret)
172                         break;
173
174                 clear_extent_bits(&info->pinned_extents, start, end,
175                                   EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
176                 last = end+1;
177         }
178 }
179
180 static int remove_sb_from_cache(struct btrfs_root *root,
181                                 struct btrfs_block_group_cache *cache)
182 {
183         struct btrfs_fs_info *fs_info = root->fs_info;
184         u64 bytenr;
185         u64 *logical;
186         int stripe_len;
187         int i, nr, ret;
188
189         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190                 bytenr = btrfs_sb_offset(i);
191                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192                                        cache->key.objectid, bytenr,
193                                        0, &logical, &nr, &stripe_len);
194                 BUG_ON(ret);
195                 while (nr--) {
196                         try_lock_extent(&fs_info->pinned_extents,
197                                         logical[nr],
198                                         logical[nr] + stripe_len - 1, GFP_NOFS);
199                 }
200                 kfree(logical);
201         }
202
203         return 0;
204 }
205
206 /*
207  * this is only called by cache_block_group, since we could have freed extents
208  * we need to check the pinned_extents for any extents that can't be used yet
209  * since their free space will be released as soon as the transaction commits.
210  */
211 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
212                               struct btrfs_fs_info *info, u64 start, u64 end)
213 {
214         u64 extent_start, extent_end, size, total_added = 0;
215         int ret;
216
217         while (start < end) {
218                 ret = find_first_extent_bit(&info->pinned_extents, start,
219                                             &extent_start, &extent_end,
220                                             EXTENT_DIRTY|EXTENT_LOCKED);
221                 if (ret)
222                         break;
223
224                 if (extent_start == start) {
225                         start = extent_end + 1;
226                 } else if (extent_start > start && extent_start < end) {
227                         size = extent_start - start;
228                         total_added += size;
229                         ret = btrfs_add_free_space(block_group, start,
230                                                    size);
231                         BUG_ON(ret);
232                         start = extent_end + 1;
233                 } else {
234                         break;
235                 }
236         }
237
238         if (start < end) {
239                 size = end - start;
240                 total_added += size;
241                 ret = btrfs_add_free_space(block_group, start, size);
242                 BUG_ON(ret);
243         }
244
245         return total_added;
246 }
247
248 static int caching_kthread(void *data)
249 {
250         struct btrfs_block_group_cache *block_group = data;
251         struct btrfs_fs_info *fs_info = block_group->fs_info;
252         u64 last = 0;
253         struct btrfs_path *path;
254         int ret = 0;
255         struct btrfs_key key;
256         struct extent_buffer *leaf;
257         int slot;
258         u64 total_found = 0;
259
260         BUG_ON(!fs_info);
261
262         path = btrfs_alloc_path();
263         if (!path)
264                 return -ENOMEM;
265
266         atomic_inc(&block_group->space_info->caching_threads);
267         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
268         /*
269          * We don't want to deadlock with somebody trying to allocate a new
270          * extent for the extent root while also trying to search the extent
271          * root to add free space.  So we skip locking and search the commit
272          * root, since its read-only
273          */
274         path->skip_locking = 1;
275         path->search_commit_root = 1;
276         path->reada = 2;
277
278         key.objectid = last;
279         key.offset = 0;
280         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
281 again:
282         /* need to make sure the commit_root doesn't disappear */
283         down_read(&fs_info->extent_commit_sem);
284
285         ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
286         if (ret < 0)
287                 goto err;
288
289         while (1) {
290                 smp_mb();
291                 if (block_group->fs_info->closing > 1) {
292                         last = (u64)-1;
293                         break;
294                 }
295
296                 leaf = path->nodes[0];
297                 slot = path->slots[0];
298                 if (slot >= btrfs_header_nritems(leaf)) {
299                         ret = btrfs_next_leaf(fs_info->extent_root, path);
300                         if (ret < 0)
301                                 goto err;
302                         else if (ret)
303                                 break;
304
305                         if (need_resched() ||
306                             btrfs_transaction_in_commit(fs_info)) {
307                                 leaf = path->nodes[0];
308
309                                 /* this shouldn't happen, but if the
310                                  * leaf is empty just move on.
311                                  */
312                                 if (btrfs_header_nritems(leaf) == 0)
313                                         break;
314                                 /*
315                                  * we need to copy the key out so that
316                                  * we are sure the next search advances
317                                  * us forward in the btree.
318                                  */
319                                 btrfs_item_key_to_cpu(leaf, &key, 0);
320                                 btrfs_release_path(fs_info->extent_root, path);
321                                 up_read(&fs_info->extent_commit_sem);
322                                 schedule_timeout(1);
323                                 goto again;
324                         }
325
326                         continue;
327                 }
328                 btrfs_item_key_to_cpu(leaf, &key, slot);
329                 if (key.objectid < block_group->key.objectid)
330                         goto next;
331
332                 if (key.objectid >= block_group->key.objectid +
333                     block_group->key.offset)
334                         break;
335
336                 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
337                         total_found += add_new_free_space(block_group,
338                                                           fs_info, last,
339                                                           key.objectid);
340                         last = key.objectid + key.offset;
341                 }
342
343                 if (total_found > (1024 * 1024 * 2)) {
344                         total_found = 0;
345                         wake_up(&block_group->caching_q);
346                 }
347 next:
348                 path->slots[0]++;
349         }
350         ret = 0;
351
352         total_found += add_new_free_space(block_group, fs_info, last,
353                                           block_group->key.objectid +
354                                           block_group->key.offset);
355
356         spin_lock(&block_group->lock);
357         block_group->cached = BTRFS_CACHE_FINISHED;
358         spin_unlock(&block_group->lock);
359
360 err:
361         btrfs_free_path(path);
362         up_read(&fs_info->extent_commit_sem);
363         atomic_dec(&block_group->space_info->caching_threads);
364         wake_up(&block_group->caching_q);
365
366         return 0;
367 }
368
369 static int cache_block_group(struct btrfs_block_group_cache *cache)
370 {
371         struct task_struct *tsk;
372         int ret = 0;
373
374         spin_lock(&cache->lock);
375         if (cache->cached != BTRFS_CACHE_NO) {
376                 spin_unlock(&cache->lock);
377                 return ret;
378         }
379         cache->cached = BTRFS_CACHE_STARTED;
380         spin_unlock(&cache->lock);
381
382         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
383                           cache->key.objectid);
384         if (IS_ERR(tsk)) {
385                 ret = PTR_ERR(tsk);
386                 printk(KERN_ERR "error running thread %d\n", ret);
387                 BUG();
388         }
389
390         return ret;
391 }
392
393 /*
394  * return the block group that starts at or after bytenr
395  */
396 static struct btrfs_block_group_cache *
397 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
398 {
399         struct btrfs_block_group_cache *cache;
400
401         cache = block_group_cache_tree_search(info, bytenr, 0);
402
403         return cache;
404 }
405
406 /*
407  * return the block group that contains the given bytenr
408  */
409 struct btrfs_block_group_cache *btrfs_lookup_block_group(
410                                                  struct btrfs_fs_info *info,
411                                                  u64 bytenr)
412 {
413         struct btrfs_block_group_cache *cache;
414
415         cache = block_group_cache_tree_search(info, bytenr, 1);
416
417         return cache;
418 }
419
420 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
421 {
422         if (atomic_dec_and_test(&cache->count))
423                 kfree(cache);
424 }
425
426 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
427                                                   u64 flags)
428 {
429         struct list_head *head = &info->space_info;
430         struct btrfs_space_info *found;
431
432         rcu_read_lock();
433         list_for_each_entry_rcu(found, head, list) {
434                 if (found->flags == flags) {
435                         rcu_read_unlock();
436                         return found;
437                 }
438         }
439         rcu_read_unlock();
440         return NULL;
441 }
442
443 /*
444  * after adding space to the filesystem, we need to clear the full flags
445  * on all the space infos.
446  */
447 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
448 {
449         struct list_head *head = &info->space_info;
450         struct btrfs_space_info *found;
451
452         rcu_read_lock();
453         list_for_each_entry_rcu(found, head, list)
454                 found->full = 0;
455         rcu_read_unlock();
456 }
457
458 static u64 div_factor(u64 num, int factor)
459 {
460         if (factor == 10)
461                 return num;
462         num *= factor;
463         do_div(num, 10);
464         return num;
465 }
466
467 u64 btrfs_find_block_group(struct btrfs_root *root,
468                            u64 search_start, u64 search_hint, int owner)
469 {
470         struct btrfs_block_group_cache *cache;
471         u64 used;
472         u64 last = max(search_hint, search_start);
473         u64 group_start = 0;
474         int full_search = 0;
475         int factor = 9;
476         int wrapped = 0;
477 again:
478         while (1) {
479                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
480                 if (!cache)
481                         break;
482
483                 spin_lock(&cache->lock);
484                 last = cache->key.objectid + cache->key.offset;
485                 used = btrfs_block_group_used(&cache->item);
486
487                 if ((full_search || !cache->ro) &&
488                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
489                         if (used + cache->pinned + cache->reserved <
490                             div_factor(cache->key.offset, factor)) {
491                                 group_start = cache->key.objectid;
492                                 spin_unlock(&cache->lock);
493                                 btrfs_put_block_group(cache);
494                                 goto found;
495                         }
496                 }
497                 spin_unlock(&cache->lock);
498                 btrfs_put_block_group(cache);
499                 cond_resched();
500         }
501         if (!wrapped) {
502                 last = search_start;
503                 wrapped = 1;
504                 goto again;
505         }
506         if (!full_search && factor < 10) {
507                 last = search_start;
508                 full_search = 1;
509                 factor = 10;
510                 goto again;
511         }
512 found:
513         return group_start;
514 }
515
516 /* simple helper to search for an existing extent at a given offset */
517 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
518 {
519         int ret;
520         struct btrfs_key key;
521         struct btrfs_path *path;
522
523         path = btrfs_alloc_path();
524         BUG_ON(!path);
525         key.objectid = start;
526         key.offset = len;
527         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
528         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
529                                 0, 0);
530         btrfs_free_path(path);
531         return ret;
532 }
533
534 /*
535  * Back reference rules.  Back refs have three main goals:
536  *
537  * 1) differentiate between all holders of references to an extent so that
538  *    when a reference is dropped we can make sure it was a valid reference
539  *    before freeing the extent.
540  *
541  * 2) Provide enough information to quickly find the holders of an extent
542  *    if we notice a given block is corrupted or bad.
543  *
544  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
545  *    maintenance.  This is actually the same as #2, but with a slightly
546  *    different use case.
547  *
548  * There are two kinds of back refs. The implicit back refs is optimized
549  * for pointers in non-shared tree blocks. For a given pointer in a block,
550  * back refs of this kind provide information about the block's owner tree
551  * and the pointer's key. These information allow us to find the block by
552  * b-tree searching. The full back refs is for pointers in tree blocks not
553  * referenced by their owner trees. The location of tree block is recorded
554  * in the back refs. Actually the full back refs is generic, and can be
555  * used in all cases the implicit back refs is used. The major shortcoming
556  * of the full back refs is its overhead. Every time a tree block gets
557  * COWed, we have to update back refs entry for all pointers in it.
558  *
559  * For a newly allocated tree block, we use implicit back refs for
560  * pointers in it. This means most tree related operations only involve
561  * implicit back refs. For a tree block created in old transaction, the
562  * only way to drop a reference to it is COW it. So we can detect the
563  * event that tree block loses its owner tree's reference and do the
564  * back refs conversion.
565  *
566  * When a tree block is COW'd through a tree, there are four cases:
567  *
568  * The reference count of the block is one and the tree is the block's
569  * owner tree. Nothing to do in this case.
570  *
571  * The reference count of the block is one and the tree is not the
572  * block's owner tree. In this case, full back refs is used for pointers
573  * in the block. Remove these full back refs, add implicit back refs for
574  * every pointers in the new block.
575  *
576  * The reference count of the block is greater than one and the tree is
577  * the block's owner tree. In this case, implicit back refs is used for
578  * pointers in the block. Add full back refs for every pointers in the
579  * block, increase lower level extents' reference counts. The original
580  * implicit back refs are entailed to the new block.
581  *
582  * The reference count of the block is greater than one and the tree is
583  * not the block's owner tree. Add implicit back refs for every pointer in
584  * the new block, increase lower level extents' reference count.
585  *
586  * Back Reference Key composing:
587  *
588  * The key objectid corresponds to the first byte in the extent,
589  * The key type is used to differentiate between types of back refs.
590  * There are different meanings of the key offset for different types
591  * of back refs.
592  *
593  * File extents can be referenced by:
594  *
595  * - multiple snapshots, subvolumes, or different generations in one subvol
596  * - different files inside a single subvolume
597  * - different offsets inside a file (bookend extents in file.c)
598  *
599  * The extent ref structure for the implicit back refs has fields for:
600  *
601  * - Objectid of the subvolume root
602  * - objectid of the file holding the reference
603  * - original offset in the file
604  * - how many bookend extents
605  *
606  * The key offset for the implicit back refs is hash of the first
607  * three fields.
608  *
609  * The extent ref structure for the full back refs has field for:
610  *
611  * - number of pointers in the tree leaf
612  *
613  * The key offset for the implicit back refs is the first byte of
614  * the tree leaf
615  *
616  * When a file extent is allocated, The implicit back refs is used.
617  * the fields are filled in:
618  *
619  *     (root_key.objectid, inode objectid, offset in file, 1)
620  *
621  * When a file extent is removed file truncation, we find the
622  * corresponding implicit back refs and check the following fields:
623  *
624  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
625  *
626  * Btree extents can be referenced by:
627  *
628  * - Different subvolumes
629  *
630  * Both the implicit back refs and the full back refs for tree blocks
631  * only consist of key. The key offset for the implicit back refs is
632  * objectid of block's owner tree. The key offset for the full back refs
633  * is the first byte of parent block.
634  *
635  * When implicit back refs is used, information about the lowest key and
636  * level of the tree block are required. These information are stored in
637  * tree block info structure.
638  */
639
640 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
641 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
642                                   struct btrfs_root *root,
643                                   struct btrfs_path *path,
644                                   u64 owner, u32 extra_size)
645 {
646         struct btrfs_extent_item *item;
647         struct btrfs_extent_item_v0 *ei0;
648         struct btrfs_extent_ref_v0 *ref0;
649         struct btrfs_tree_block_info *bi;
650         struct extent_buffer *leaf;
651         struct btrfs_key key;
652         struct btrfs_key found_key;
653         u32 new_size = sizeof(*item);
654         u64 refs;
655         int ret;
656
657         leaf = path->nodes[0];
658         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
659
660         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
661         ei0 = btrfs_item_ptr(leaf, path->slots[0],
662                              struct btrfs_extent_item_v0);
663         refs = btrfs_extent_refs_v0(leaf, ei0);
664
665         if (owner == (u64)-1) {
666                 while (1) {
667                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
668                                 ret = btrfs_next_leaf(root, path);
669                                 if (ret < 0)
670                                         return ret;
671                                 BUG_ON(ret > 0);
672                                 leaf = path->nodes[0];
673                         }
674                         btrfs_item_key_to_cpu(leaf, &found_key,
675                                               path->slots[0]);
676                         BUG_ON(key.objectid != found_key.objectid);
677                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
678                                 path->slots[0]++;
679                                 continue;
680                         }
681                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
682                                               struct btrfs_extent_ref_v0);
683                         owner = btrfs_ref_objectid_v0(leaf, ref0);
684                         break;
685                 }
686         }
687         btrfs_release_path(root, path);
688
689         if (owner < BTRFS_FIRST_FREE_OBJECTID)
690                 new_size += sizeof(*bi);
691
692         new_size -= sizeof(*ei0);
693         ret = btrfs_search_slot(trans, root, &key, path,
694                                 new_size + extra_size, 1);
695         if (ret < 0)
696                 return ret;
697         BUG_ON(ret);
698
699         ret = btrfs_extend_item(trans, root, path, new_size);
700         BUG_ON(ret);
701
702         leaf = path->nodes[0];
703         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
704         btrfs_set_extent_refs(leaf, item, refs);
705         /* FIXME: get real generation */
706         btrfs_set_extent_generation(leaf, item, 0);
707         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
708                 btrfs_set_extent_flags(leaf, item,
709                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
710                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
711                 bi = (struct btrfs_tree_block_info *)(item + 1);
712                 /* FIXME: get first key of the block */
713                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
714                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
715         } else {
716                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
717         }
718         btrfs_mark_buffer_dirty(leaf);
719         return 0;
720 }
721 #endif
722
723 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
724 {
725         u32 high_crc = ~(u32)0;
726         u32 low_crc = ~(u32)0;
727         __le64 lenum;
728
729         lenum = cpu_to_le64(root_objectid);
730         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
731         lenum = cpu_to_le64(owner);
732         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
733         lenum = cpu_to_le64(offset);
734         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
735
736         return ((u64)high_crc << 31) ^ (u64)low_crc;
737 }
738
739 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
740                                      struct btrfs_extent_data_ref *ref)
741 {
742         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
743                                     btrfs_extent_data_ref_objectid(leaf, ref),
744                                     btrfs_extent_data_ref_offset(leaf, ref));
745 }
746
747 static int match_extent_data_ref(struct extent_buffer *leaf,
748                                  struct btrfs_extent_data_ref *ref,
749                                  u64 root_objectid, u64 owner, u64 offset)
750 {
751         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
752             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
753             btrfs_extent_data_ref_offset(leaf, ref) != offset)
754                 return 0;
755         return 1;
756 }
757
758 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
759                                            struct btrfs_root *root,
760                                            struct btrfs_path *path,
761                                            u64 bytenr, u64 parent,
762                                            u64 root_objectid,
763                                            u64 owner, u64 offset)
764 {
765         struct btrfs_key key;
766         struct btrfs_extent_data_ref *ref;
767         struct extent_buffer *leaf;
768         u32 nritems;
769         int ret;
770         int recow;
771         int err = -ENOENT;
772
773         key.objectid = bytenr;
774         if (parent) {
775                 key.type = BTRFS_SHARED_DATA_REF_KEY;
776                 key.offset = parent;
777         } else {
778                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
779                 key.offset = hash_extent_data_ref(root_objectid,
780                                                   owner, offset);
781         }
782 again:
783         recow = 0;
784         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
785         if (ret < 0) {
786                 err = ret;
787                 goto fail;
788         }
789
790         if (parent) {
791                 if (!ret)
792                         return 0;
793 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
794                 key.type = BTRFS_EXTENT_REF_V0_KEY;
795                 btrfs_release_path(root, path);
796                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
797                 if (ret < 0) {
798                         err = ret;
799                         goto fail;
800                 }
801                 if (!ret)
802                         return 0;
803 #endif
804                 goto fail;
805         }
806
807         leaf = path->nodes[0];
808         nritems = btrfs_header_nritems(leaf);
809         while (1) {
810                 if (path->slots[0] >= nritems) {
811                         ret = btrfs_next_leaf(root, path);
812                         if (ret < 0)
813                                 err = ret;
814                         if (ret)
815                                 goto fail;
816
817                         leaf = path->nodes[0];
818                         nritems = btrfs_header_nritems(leaf);
819                         recow = 1;
820                 }
821
822                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
823                 if (key.objectid != bytenr ||
824                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
825                         goto fail;
826
827                 ref = btrfs_item_ptr(leaf, path->slots[0],
828                                      struct btrfs_extent_data_ref);
829
830                 if (match_extent_data_ref(leaf, ref, root_objectid,
831                                           owner, offset)) {
832                         if (recow) {
833                                 btrfs_release_path(root, path);
834                                 goto again;
835                         }
836                         err = 0;
837                         break;
838                 }
839                 path->slots[0]++;
840         }
841 fail:
842         return err;
843 }
844
845 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
846                                            struct btrfs_root *root,
847                                            struct btrfs_path *path,
848                                            u64 bytenr, u64 parent,
849                                            u64 root_objectid, u64 owner,
850                                            u64 offset, int refs_to_add)
851 {
852         struct btrfs_key key;
853         struct extent_buffer *leaf;
854         u32 size;
855         u32 num_refs;
856         int ret;
857
858         key.objectid = bytenr;
859         if (parent) {
860                 key.type = BTRFS_SHARED_DATA_REF_KEY;
861                 key.offset = parent;
862                 size = sizeof(struct btrfs_shared_data_ref);
863         } else {
864                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
865                 key.offset = hash_extent_data_ref(root_objectid,
866                                                   owner, offset);
867                 size = sizeof(struct btrfs_extent_data_ref);
868         }
869
870         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
871         if (ret && ret != -EEXIST)
872                 goto fail;
873
874         leaf = path->nodes[0];
875         if (parent) {
876                 struct btrfs_shared_data_ref *ref;
877                 ref = btrfs_item_ptr(leaf, path->slots[0],
878                                      struct btrfs_shared_data_ref);
879                 if (ret == 0) {
880                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
881                 } else {
882                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
883                         num_refs += refs_to_add;
884                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
885                 }
886         } else {
887                 struct btrfs_extent_data_ref *ref;
888                 while (ret == -EEXIST) {
889                         ref = btrfs_item_ptr(leaf, path->slots[0],
890                                              struct btrfs_extent_data_ref);
891                         if (match_extent_data_ref(leaf, ref, root_objectid,
892                                                   owner, offset))
893                                 break;
894                         btrfs_release_path(root, path);
895                         key.offset++;
896                         ret = btrfs_insert_empty_item(trans, root, path, &key,
897                                                       size);
898                         if (ret && ret != -EEXIST)
899                                 goto fail;
900
901                         leaf = path->nodes[0];
902                 }
903                 ref = btrfs_item_ptr(leaf, path->slots[0],
904                                      struct btrfs_extent_data_ref);
905                 if (ret == 0) {
906                         btrfs_set_extent_data_ref_root(leaf, ref,
907                                                        root_objectid);
908                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
909                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
910                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
911                 } else {
912                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
913                         num_refs += refs_to_add;
914                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
915                 }
916         }
917         btrfs_mark_buffer_dirty(leaf);
918         ret = 0;
919 fail:
920         btrfs_release_path(root, path);
921         return ret;
922 }
923
924 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
925                                            struct btrfs_root *root,
926                                            struct btrfs_path *path,
927                                            int refs_to_drop)
928 {
929         struct btrfs_key key;
930         struct btrfs_extent_data_ref *ref1 = NULL;
931         struct btrfs_shared_data_ref *ref2 = NULL;
932         struct extent_buffer *leaf;
933         u32 num_refs = 0;
934         int ret = 0;
935
936         leaf = path->nodes[0];
937         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
938
939         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
940                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
941                                       struct btrfs_extent_data_ref);
942                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
943         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
944                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
945                                       struct btrfs_shared_data_ref);
946                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
947 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
948         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
949                 struct btrfs_extent_ref_v0 *ref0;
950                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
951                                       struct btrfs_extent_ref_v0);
952                 num_refs = btrfs_ref_count_v0(leaf, ref0);
953 #endif
954         } else {
955                 BUG();
956         }
957
958         BUG_ON(num_refs < refs_to_drop);
959         num_refs -= refs_to_drop;
960
961         if (num_refs == 0) {
962                 ret = btrfs_del_item(trans, root, path);
963         } else {
964                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
965                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
966                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
967                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
968 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
969                 else {
970                         struct btrfs_extent_ref_v0 *ref0;
971                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
972                                         struct btrfs_extent_ref_v0);
973                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
974                 }
975 #endif
976                 btrfs_mark_buffer_dirty(leaf);
977         }
978         return ret;
979 }
980
981 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
982                                           struct btrfs_path *path,
983                                           struct btrfs_extent_inline_ref *iref)
984 {
985         struct btrfs_key key;
986         struct extent_buffer *leaf;
987         struct btrfs_extent_data_ref *ref1;
988         struct btrfs_shared_data_ref *ref2;
989         u32 num_refs = 0;
990
991         leaf = path->nodes[0];
992         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
993         if (iref) {
994                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
995                     BTRFS_EXTENT_DATA_REF_KEY) {
996                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
997                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
998                 } else {
999                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1000                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1001                 }
1002         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1003                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1004                                       struct btrfs_extent_data_ref);
1005                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1006         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1007                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1008                                       struct btrfs_shared_data_ref);
1009                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1010 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1011         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1012                 struct btrfs_extent_ref_v0 *ref0;
1013                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1014                                       struct btrfs_extent_ref_v0);
1015                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1016 #endif
1017         } else {
1018                 WARN_ON(1);
1019         }
1020         return num_refs;
1021 }
1022
1023 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1024                                           struct btrfs_root *root,
1025                                           struct btrfs_path *path,
1026                                           u64 bytenr, u64 parent,
1027                                           u64 root_objectid)
1028 {
1029         struct btrfs_key key;
1030         int ret;
1031
1032         key.objectid = bytenr;
1033         if (parent) {
1034                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1035                 key.offset = parent;
1036         } else {
1037                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1038                 key.offset = root_objectid;
1039         }
1040
1041         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1042         if (ret > 0)
1043                 ret = -ENOENT;
1044 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1045         if (ret == -ENOENT && parent) {
1046                 btrfs_release_path(root, path);
1047                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1048                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1049                 if (ret > 0)
1050                         ret = -ENOENT;
1051         }
1052 #endif
1053         return ret;
1054 }
1055
1056 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1057                                           struct btrfs_root *root,
1058                                           struct btrfs_path *path,
1059                                           u64 bytenr, u64 parent,
1060                                           u64 root_objectid)
1061 {
1062         struct btrfs_key key;
1063         int ret;
1064
1065         key.objectid = bytenr;
1066         if (parent) {
1067                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1068                 key.offset = parent;
1069         } else {
1070                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1071                 key.offset = root_objectid;
1072         }
1073
1074         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1075         btrfs_release_path(root, path);
1076         return ret;
1077 }
1078
1079 static inline int extent_ref_type(u64 parent, u64 owner)
1080 {
1081         int type;
1082         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1083                 if (parent > 0)
1084                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1085                 else
1086                         type = BTRFS_TREE_BLOCK_REF_KEY;
1087         } else {
1088                 if (parent > 0)
1089                         type = BTRFS_SHARED_DATA_REF_KEY;
1090                 else
1091                         type = BTRFS_EXTENT_DATA_REF_KEY;
1092         }
1093         return type;
1094 }
1095
1096 static int find_next_key(struct btrfs_path *path, int level,
1097                          struct btrfs_key *key)
1098
1099 {
1100         for (; level < BTRFS_MAX_LEVEL; level++) {
1101                 if (!path->nodes[level])
1102                         break;
1103                 if (path->slots[level] + 1 >=
1104                     btrfs_header_nritems(path->nodes[level]))
1105                         continue;
1106                 if (level == 0)
1107                         btrfs_item_key_to_cpu(path->nodes[level], key,
1108                                               path->slots[level] + 1);
1109                 else
1110                         btrfs_node_key_to_cpu(path->nodes[level], key,
1111                                               path->slots[level] + 1);
1112                 return 0;
1113         }
1114         return 1;
1115 }
1116
1117 /*
1118  * look for inline back ref. if back ref is found, *ref_ret is set
1119  * to the address of inline back ref, and 0 is returned.
1120  *
1121  * if back ref isn't found, *ref_ret is set to the address where it
1122  * should be inserted, and -ENOENT is returned.
1123  *
1124  * if insert is true and there are too many inline back refs, the path
1125  * points to the extent item, and -EAGAIN is returned.
1126  *
1127  * NOTE: inline back refs are ordered in the same way that back ref
1128  *       items in the tree are ordered.
1129  */
1130 static noinline_for_stack
1131 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1132                                  struct btrfs_root *root,
1133                                  struct btrfs_path *path,
1134                                  struct btrfs_extent_inline_ref **ref_ret,
1135                                  u64 bytenr, u64 num_bytes,
1136                                  u64 parent, u64 root_objectid,
1137                                  u64 owner, u64 offset, int insert)
1138 {
1139         struct btrfs_key key;
1140         struct extent_buffer *leaf;
1141         struct btrfs_extent_item *ei;
1142         struct btrfs_extent_inline_ref *iref;
1143         u64 flags;
1144         u64 item_size;
1145         unsigned long ptr;
1146         unsigned long end;
1147         int extra_size;
1148         int type;
1149         int want;
1150         int ret;
1151         int err = 0;
1152
1153         key.objectid = bytenr;
1154         key.type = BTRFS_EXTENT_ITEM_KEY;
1155         key.offset = num_bytes;
1156
1157         want = extent_ref_type(parent, owner);
1158         if (insert) {
1159                 extra_size = btrfs_extent_inline_ref_size(want);
1160                 path->keep_locks = 1;
1161         } else
1162                 extra_size = -1;
1163         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1164         if (ret < 0) {
1165                 err = ret;
1166                 goto out;
1167         }
1168         BUG_ON(ret);
1169
1170         leaf = path->nodes[0];
1171         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1172 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1173         if (item_size < sizeof(*ei)) {
1174                 if (!insert) {
1175                         err = -ENOENT;
1176                         goto out;
1177                 }
1178                 ret = convert_extent_item_v0(trans, root, path, owner,
1179                                              extra_size);
1180                 if (ret < 0) {
1181                         err = ret;
1182                         goto out;
1183                 }
1184                 leaf = path->nodes[0];
1185                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1186         }
1187 #endif
1188         BUG_ON(item_size < sizeof(*ei));
1189
1190         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1191         flags = btrfs_extent_flags(leaf, ei);
1192
1193         ptr = (unsigned long)(ei + 1);
1194         end = (unsigned long)ei + item_size;
1195
1196         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1197                 ptr += sizeof(struct btrfs_tree_block_info);
1198                 BUG_ON(ptr > end);
1199         } else {
1200                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1201         }
1202
1203         err = -ENOENT;
1204         while (1) {
1205                 if (ptr >= end) {
1206                         WARN_ON(ptr > end);
1207                         break;
1208                 }
1209                 iref = (struct btrfs_extent_inline_ref *)ptr;
1210                 type = btrfs_extent_inline_ref_type(leaf, iref);
1211                 if (want < type)
1212                         break;
1213                 if (want > type) {
1214                         ptr += btrfs_extent_inline_ref_size(type);
1215                         continue;
1216                 }
1217
1218                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1219                         struct btrfs_extent_data_ref *dref;
1220                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1221                         if (match_extent_data_ref(leaf, dref, root_objectid,
1222                                                   owner, offset)) {
1223                                 err = 0;
1224                                 break;
1225                         }
1226                         if (hash_extent_data_ref_item(leaf, dref) <
1227                             hash_extent_data_ref(root_objectid, owner, offset))
1228                                 break;
1229                 } else {
1230                         u64 ref_offset;
1231                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1232                         if (parent > 0) {
1233                                 if (parent == ref_offset) {
1234                                         err = 0;
1235                                         break;
1236                                 }
1237                                 if (ref_offset < parent)
1238                                         break;
1239                         } else {
1240                                 if (root_objectid == ref_offset) {
1241                                         err = 0;
1242                                         break;
1243                                 }
1244                                 if (ref_offset < root_objectid)
1245                                         break;
1246                         }
1247                 }
1248                 ptr += btrfs_extent_inline_ref_size(type);
1249         }
1250         if (err == -ENOENT && insert) {
1251                 if (item_size + extra_size >=
1252                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1253                         err = -EAGAIN;
1254                         goto out;
1255                 }
1256                 /*
1257                  * To add new inline back ref, we have to make sure
1258                  * there is no corresponding back ref item.
1259                  * For simplicity, we just do not add new inline back
1260                  * ref if there is any kind of item for this block
1261                  */
1262                 if (find_next_key(path, 0, &key) == 0 &&
1263                     key.objectid == bytenr &&
1264                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1265                         err = -EAGAIN;
1266                         goto out;
1267                 }
1268         }
1269         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1270 out:
1271         if (insert) {
1272                 path->keep_locks = 0;
1273                 btrfs_unlock_up_safe(path, 1);
1274         }
1275         return err;
1276 }
1277
1278 /*
1279  * helper to add new inline back ref
1280  */
1281 static noinline_for_stack
1282 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1283                                 struct btrfs_root *root,
1284                                 struct btrfs_path *path,
1285                                 struct btrfs_extent_inline_ref *iref,
1286                                 u64 parent, u64 root_objectid,
1287                                 u64 owner, u64 offset, int refs_to_add,
1288                                 struct btrfs_delayed_extent_op *extent_op)
1289 {
1290         struct extent_buffer *leaf;
1291         struct btrfs_extent_item *ei;
1292         unsigned long ptr;
1293         unsigned long end;
1294         unsigned long item_offset;
1295         u64 refs;
1296         int size;
1297         int type;
1298         int ret;
1299
1300         leaf = path->nodes[0];
1301         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1302         item_offset = (unsigned long)iref - (unsigned long)ei;
1303
1304         type = extent_ref_type(parent, owner);
1305         size = btrfs_extent_inline_ref_size(type);
1306
1307         ret = btrfs_extend_item(trans, root, path, size);
1308         BUG_ON(ret);
1309
1310         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1311         refs = btrfs_extent_refs(leaf, ei);
1312         refs += refs_to_add;
1313         btrfs_set_extent_refs(leaf, ei, refs);
1314         if (extent_op)
1315                 __run_delayed_extent_op(extent_op, leaf, ei);
1316
1317         ptr = (unsigned long)ei + item_offset;
1318         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1319         if (ptr < end - size)
1320                 memmove_extent_buffer(leaf, ptr + size, ptr,
1321                                       end - size - ptr);
1322
1323         iref = (struct btrfs_extent_inline_ref *)ptr;
1324         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1325         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1326                 struct btrfs_extent_data_ref *dref;
1327                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1328                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1329                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1330                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1331                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1332         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1333                 struct btrfs_shared_data_ref *sref;
1334                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1335                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1336                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1337         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1338                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1339         } else {
1340                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1341         }
1342         btrfs_mark_buffer_dirty(leaf);
1343         return 0;
1344 }
1345
1346 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1347                                  struct btrfs_root *root,
1348                                  struct btrfs_path *path,
1349                                  struct btrfs_extent_inline_ref **ref_ret,
1350                                  u64 bytenr, u64 num_bytes, u64 parent,
1351                                  u64 root_objectid, u64 owner, u64 offset)
1352 {
1353         int ret;
1354
1355         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1356                                            bytenr, num_bytes, parent,
1357                                            root_objectid, owner, offset, 0);
1358         if (ret != -ENOENT)
1359                 return ret;
1360
1361         btrfs_release_path(root, path);
1362         *ref_ret = NULL;
1363
1364         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1365                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1366                                             root_objectid);
1367         } else {
1368                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1369                                              root_objectid, owner, offset);
1370         }
1371         return ret;
1372 }
1373
1374 /*
1375  * helper to update/remove inline back ref
1376  */
1377 static noinline_for_stack
1378 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1379                                  struct btrfs_root *root,
1380                                  struct btrfs_path *path,
1381                                  struct btrfs_extent_inline_ref *iref,
1382                                  int refs_to_mod,
1383                                  struct btrfs_delayed_extent_op *extent_op)
1384 {
1385         struct extent_buffer *leaf;
1386         struct btrfs_extent_item *ei;
1387         struct btrfs_extent_data_ref *dref = NULL;
1388         struct btrfs_shared_data_ref *sref = NULL;
1389         unsigned long ptr;
1390         unsigned long end;
1391         u32 item_size;
1392         int size;
1393         int type;
1394         int ret;
1395         u64 refs;
1396
1397         leaf = path->nodes[0];
1398         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1399         refs = btrfs_extent_refs(leaf, ei);
1400         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1401         refs += refs_to_mod;
1402         btrfs_set_extent_refs(leaf, ei, refs);
1403         if (extent_op)
1404                 __run_delayed_extent_op(extent_op, leaf, ei);
1405
1406         type = btrfs_extent_inline_ref_type(leaf, iref);
1407
1408         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1409                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1410                 refs = btrfs_extent_data_ref_count(leaf, dref);
1411         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1412                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1413                 refs = btrfs_shared_data_ref_count(leaf, sref);
1414         } else {
1415                 refs = 1;
1416                 BUG_ON(refs_to_mod != -1);
1417         }
1418
1419         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1420         refs += refs_to_mod;
1421
1422         if (refs > 0) {
1423                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1424                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1425                 else
1426                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1427         } else {
1428                 size =  btrfs_extent_inline_ref_size(type);
1429                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1430                 ptr = (unsigned long)iref;
1431                 end = (unsigned long)ei + item_size;
1432                 if (ptr + size < end)
1433                         memmove_extent_buffer(leaf, ptr, ptr + size,
1434                                               end - ptr - size);
1435                 item_size -= size;
1436                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1437                 BUG_ON(ret);
1438         }
1439         btrfs_mark_buffer_dirty(leaf);
1440         return 0;
1441 }
1442
1443 static noinline_for_stack
1444 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1445                                  struct btrfs_root *root,
1446                                  struct btrfs_path *path,
1447                                  u64 bytenr, u64 num_bytes, u64 parent,
1448                                  u64 root_objectid, u64 owner,
1449                                  u64 offset, int refs_to_add,
1450                                  struct btrfs_delayed_extent_op *extent_op)
1451 {
1452         struct btrfs_extent_inline_ref *iref;
1453         int ret;
1454
1455         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1456                                            bytenr, num_bytes, parent,
1457                                            root_objectid, owner, offset, 1);
1458         if (ret == 0) {
1459                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1460                 ret = update_inline_extent_backref(trans, root, path, iref,
1461                                                    refs_to_add, extent_op);
1462         } else if (ret == -ENOENT) {
1463                 ret = setup_inline_extent_backref(trans, root, path, iref,
1464                                                   parent, root_objectid,
1465                                                   owner, offset, refs_to_add,
1466                                                   extent_op);
1467         }
1468         return ret;
1469 }
1470
1471 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1472                                  struct btrfs_root *root,
1473                                  struct btrfs_path *path,
1474                                  u64 bytenr, u64 parent, u64 root_objectid,
1475                                  u64 owner, u64 offset, int refs_to_add)
1476 {
1477         int ret;
1478         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1479                 BUG_ON(refs_to_add != 1);
1480                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1481                                             parent, root_objectid);
1482         } else {
1483                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1484                                              parent, root_objectid,
1485                                              owner, offset, refs_to_add);
1486         }
1487         return ret;
1488 }
1489
1490 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1491                                  struct btrfs_root *root,
1492                                  struct btrfs_path *path,
1493                                  struct btrfs_extent_inline_ref *iref,
1494                                  int refs_to_drop, int is_data)
1495 {
1496         int ret;
1497
1498         BUG_ON(!is_data && refs_to_drop != 1);
1499         if (iref) {
1500                 ret = update_inline_extent_backref(trans, root, path, iref,
1501                                                    -refs_to_drop, NULL);
1502         } else if (is_data) {
1503                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1504         } else {
1505                 ret = btrfs_del_item(trans, root, path);
1506         }
1507         return ret;
1508 }
1509
1510 #ifdef BIO_RW_DISCARD
1511 static void btrfs_issue_discard(struct block_device *bdev,
1512                                 u64 start, u64 len)
1513 {
1514         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
1515 }
1516 #endif
1517
1518 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1519                                 u64 num_bytes)
1520 {
1521 #ifdef BIO_RW_DISCARD
1522         int ret;
1523         u64 map_length = num_bytes;
1524         struct btrfs_multi_bio *multi = NULL;
1525
1526         /* Tell the block device(s) that the sectors can be discarded */
1527         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1528                               bytenr, &map_length, &multi, 0);
1529         if (!ret) {
1530                 struct btrfs_bio_stripe *stripe = multi->stripes;
1531                 int i;
1532
1533                 if (map_length > num_bytes)
1534                         map_length = num_bytes;
1535
1536                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1537                         btrfs_issue_discard(stripe->dev->bdev,
1538                                             stripe->physical,
1539                                             map_length);
1540                 }
1541                 kfree(multi);
1542         }
1543
1544         return ret;
1545 #else
1546         return 0;
1547 #endif
1548 }
1549
1550 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1551                          struct btrfs_root *root,
1552                          u64 bytenr, u64 num_bytes, u64 parent,
1553                          u64 root_objectid, u64 owner, u64 offset)
1554 {
1555         int ret;
1556         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1557                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1558
1559         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1560                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1561                                         parent, root_objectid, (int)owner,
1562                                         BTRFS_ADD_DELAYED_REF, NULL);
1563         } else {
1564                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1565                                         parent, root_objectid, owner, offset,
1566                                         BTRFS_ADD_DELAYED_REF, NULL);
1567         }
1568         return ret;
1569 }
1570
1571 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1572                                   struct btrfs_root *root,
1573                                   u64 bytenr, u64 num_bytes,
1574                                   u64 parent, u64 root_objectid,
1575                                   u64 owner, u64 offset, int refs_to_add,
1576                                   struct btrfs_delayed_extent_op *extent_op)
1577 {
1578         struct btrfs_path *path;
1579         struct extent_buffer *leaf;
1580         struct btrfs_extent_item *item;
1581         u64 refs;
1582         int ret;
1583         int err = 0;
1584
1585         path = btrfs_alloc_path();
1586         if (!path)
1587                 return -ENOMEM;
1588
1589         path->reada = 1;
1590         path->leave_spinning = 1;
1591         /* this will setup the path even if it fails to insert the back ref */
1592         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1593                                            path, bytenr, num_bytes, parent,
1594                                            root_objectid, owner, offset,
1595                                            refs_to_add, extent_op);
1596         if (ret == 0)
1597                 goto out;
1598
1599         if (ret != -EAGAIN) {
1600                 err = ret;
1601                 goto out;
1602         }
1603
1604         leaf = path->nodes[0];
1605         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1606         refs = btrfs_extent_refs(leaf, item);
1607         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1608         if (extent_op)
1609                 __run_delayed_extent_op(extent_op, leaf, item);
1610
1611         btrfs_mark_buffer_dirty(leaf);
1612         btrfs_release_path(root->fs_info->extent_root, path);
1613
1614         path->reada = 1;
1615         path->leave_spinning = 1;
1616
1617         /* now insert the actual backref */
1618         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1619                                     path, bytenr, parent, root_objectid,
1620                                     owner, offset, refs_to_add);
1621         BUG_ON(ret);
1622 out:
1623         btrfs_free_path(path);
1624         return err;
1625 }
1626
1627 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1628                                 struct btrfs_root *root,
1629                                 struct btrfs_delayed_ref_node *node,
1630                                 struct btrfs_delayed_extent_op *extent_op,
1631                                 int insert_reserved)
1632 {
1633         int ret = 0;
1634         struct btrfs_delayed_data_ref *ref;
1635         struct btrfs_key ins;
1636         u64 parent = 0;
1637         u64 ref_root = 0;
1638         u64 flags = 0;
1639
1640         ins.objectid = node->bytenr;
1641         ins.offset = node->num_bytes;
1642         ins.type = BTRFS_EXTENT_ITEM_KEY;
1643
1644         ref = btrfs_delayed_node_to_data_ref(node);
1645         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1646                 parent = ref->parent;
1647         else
1648                 ref_root = ref->root;
1649
1650         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1651                 if (extent_op) {
1652                         BUG_ON(extent_op->update_key);
1653                         flags |= extent_op->flags_to_set;
1654                 }
1655                 ret = alloc_reserved_file_extent(trans, root,
1656                                                  parent, ref_root, flags,
1657                                                  ref->objectid, ref->offset,
1658                                                  &ins, node->ref_mod);
1659                 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1660         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1661                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1662                                              node->num_bytes, parent,
1663                                              ref_root, ref->objectid,
1664                                              ref->offset, node->ref_mod,
1665                                              extent_op);
1666         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1667                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1668                                           node->num_bytes, parent,
1669                                           ref_root, ref->objectid,
1670                                           ref->offset, node->ref_mod,
1671                                           extent_op);
1672         } else {
1673                 BUG();
1674         }
1675         return ret;
1676 }
1677
1678 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1679                                     struct extent_buffer *leaf,
1680                                     struct btrfs_extent_item *ei)
1681 {
1682         u64 flags = btrfs_extent_flags(leaf, ei);
1683         if (extent_op->update_flags) {
1684                 flags |= extent_op->flags_to_set;
1685                 btrfs_set_extent_flags(leaf, ei, flags);
1686         }
1687
1688         if (extent_op->update_key) {
1689                 struct btrfs_tree_block_info *bi;
1690                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1691                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1692                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1693         }
1694 }
1695
1696 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1697                                  struct btrfs_root *root,
1698                                  struct btrfs_delayed_ref_node *node,
1699                                  struct btrfs_delayed_extent_op *extent_op)
1700 {
1701         struct btrfs_key key;
1702         struct btrfs_path *path;
1703         struct btrfs_extent_item *ei;
1704         struct extent_buffer *leaf;
1705         u32 item_size;
1706         int ret;
1707         int err = 0;
1708
1709         path = btrfs_alloc_path();
1710         if (!path)
1711                 return -ENOMEM;
1712
1713         key.objectid = node->bytenr;
1714         key.type = BTRFS_EXTENT_ITEM_KEY;
1715         key.offset = node->num_bytes;
1716
1717         path->reada = 1;
1718         path->leave_spinning = 1;
1719         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1720                                 path, 0, 1);
1721         if (ret < 0) {
1722                 err = ret;
1723                 goto out;
1724         }
1725         if (ret > 0) {
1726                 err = -EIO;
1727                 goto out;
1728         }
1729
1730         leaf = path->nodes[0];
1731         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1732 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1733         if (item_size < sizeof(*ei)) {
1734                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1735                                              path, (u64)-1, 0);
1736                 if (ret < 0) {
1737                         err = ret;
1738                         goto out;
1739                 }
1740                 leaf = path->nodes[0];
1741                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1742         }
1743 #endif
1744         BUG_ON(item_size < sizeof(*ei));
1745         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1746         __run_delayed_extent_op(extent_op, leaf, ei);
1747
1748         btrfs_mark_buffer_dirty(leaf);
1749 out:
1750         btrfs_free_path(path);
1751         return err;
1752 }
1753
1754 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1755                                 struct btrfs_root *root,
1756                                 struct btrfs_delayed_ref_node *node,
1757                                 struct btrfs_delayed_extent_op *extent_op,
1758                                 int insert_reserved)
1759 {
1760         int ret = 0;
1761         struct btrfs_delayed_tree_ref *ref;
1762         struct btrfs_key ins;
1763         u64 parent = 0;
1764         u64 ref_root = 0;
1765
1766         ins.objectid = node->bytenr;
1767         ins.offset = node->num_bytes;
1768         ins.type = BTRFS_EXTENT_ITEM_KEY;
1769
1770         ref = btrfs_delayed_node_to_tree_ref(node);
1771         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1772                 parent = ref->parent;
1773         else
1774                 ref_root = ref->root;
1775
1776         BUG_ON(node->ref_mod != 1);
1777         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1778                 BUG_ON(!extent_op || !extent_op->update_flags ||
1779                        !extent_op->update_key);
1780                 ret = alloc_reserved_tree_block(trans, root,
1781                                                 parent, ref_root,
1782                                                 extent_op->flags_to_set,
1783                                                 &extent_op->key,
1784                                                 ref->level, &ins);
1785                 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1786         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1787                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1788                                              node->num_bytes, parent, ref_root,
1789                                              ref->level, 0, 1, extent_op);
1790         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1791                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1792                                           node->num_bytes, parent, ref_root,
1793                                           ref->level, 0, 1, extent_op);
1794         } else {
1795                 BUG();
1796         }
1797         return ret;
1798 }
1799
1800
1801 /* helper function to actually process a single delayed ref entry */
1802 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1803                                struct btrfs_root *root,
1804                                struct btrfs_delayed_ref_node *node,
1805                                struct btrfs_delayed_extent_op *extent_op,
1806                                int insert_reserved)
1807 {
1808         int ret;
1809         if (btrfs_delayed_ref_is_head(node)) {
1810                 struct btrfs_delayed_ref_head *head;
1811                 /*
1812                  * we've hit the end of the chain and we were supposed
1813                  * to insert this extent into the tree.  But, it got
1814                  * deleted before we ever needed to insert it, so all
1815                  * we have to do is clean up the accounting
1816                  */
1817                 BUG_ON(extent_op);
1818                 head = btrfs_delayed_node_to_head(node);
1819                 if (insert_reserved) {
1820                         if (head->is_data) {
1821                                 ret = btrfs_del_csums(trans, root,
1822                                                       node->bytenr,
1823                                                       node->num_bytes);
1824                                 BUG_ON(ret);
1825                         }
1826                         btrfs_update_pinned_extents(root, node->bytenr,
1827                                                     node->num_bytes, 1);
1828                         update_reserved_extents(root, node->bytenr,
1829                                                 node->num_bytes, 0);
1830                 }
1831                 mutex_unlock(&head->mutex);
1832                 return 0;
1833         }
1834
1835         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1836             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1837                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1838                                            insert_reserved);
1839         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1840                  node->type == BTRFS_SHARED_DATA_REF_KEY)
1841                 ret = run_delayed_data_ref(trans, root, node, extent_op,
1842                                            insert_reserved);
1843         else
1844                 BUG();
1845         return ret;
1846 }
1847
1848 static noinline struct btrfs_delayed_ref_node *
1849 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1850 {
1851         struct rb_node *node;
1852         struct btrfs_delayed_ref_node *ref;
1853         int action = BTRFS_ADD_DELAYED_REF;
1854 again:
1855         /*
1856          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1857          * this prevents ref count from going down to zero when
1858          * there still are pending delayed ref.
1859          */
1860         node = rb_prev(&head->node.rb_node);
1861         while (1) {
1862                 if (!node)
1863                         break;
1864                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1865                                 rb_node);
1866                 if (ref->bytenr != head->node.bytenr)
1867                         break;
1868                 if (ref->action == action)
1869                         return ref;
1870                 node = rb_prev(node);
1871         }
1872         if (action == BTRFS_ADD_DELAYED_REF) {
1873                 action = BTRFS_DROP_DELAYED_REF;
1874                 goto again;
1875         }
1876         return NULL;
1877 }
1878
1879 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1880                                        struct btrfs_root *root,
1881                                        struct list_head *cluster)
1882 {
1883         struct btrfs_delayed_ref_root *delayed_refs;
1884         struct btrfs_delayed_ref_node *ref;
1885         struct btrfs_delayed_ref_head *locked_ref = NULL;
1886         struct btrfs_delayed_extent_op *extent_op;
1887         int ret;
1888         int count = 0;
1889         int must_insert_reserved = 0;
1890
1891         delayed_refs = &trans->transaction->delayed_refs;
1892         while (1) {
1893                 if (!locked_ref) {
1894                         /* pick a new head ref from the cluster list */
1895                         if (list_empty(cluster))
1896                                 break;
1897
1898                         locked_ref = list_entry(cluster->next,
1899                                      struct btrfs_delayed_ref_head, cluster);
1900
1901                         /* grab the lock that says we are going to process
1902                          * all the refs for this head */
1903                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
1904
1905                         /*
1906                          * we may have dropped the spin lock to get the head
1907                          * mutex lock, and that might have given someone else
1908                          * time to free the head.  If that's true, it has been
1909                          * removed from our list and we can move on.
1910                          */
1911                         if (ret == -EAGAIN) {
1912                                 locked_ref = NULL;
1913                                 count++;
1914                                 continue;
1915                         }
1916                 }
1917
1918                 /*
1919                  * record the must insert reserved flag before we
1920                  * drop the spin lock.
1921                  */
1922                 must_insert_reserved = locked_ref->must_insert_reserved;
1923                 locked_ref->must_insert_reserved = 0;
1924
1925                 extent_op = locked_ref->extent_op;
1926                 locked_ref->extent_op = NULL;
1927
1928                 /*
1929                  * locked_ref is the head node, so we have to go one
1930                  * node back for any delayed ref updates
1931                  */
1932                 ref = select_delayed_ref(locked_ref);
1933                 if (!ref) {
1934                         /* All delayed refs have been processed, Go ahead
1935                          * and send the head node to run_one_delayed_ref,
1936                          * so that any accounting fixes can happen
1937                          */
1938                         ref = &locked_ref->node;
1939
1940                         if (extent_op && must_insert_reserved) {
1941                                 kfree(extent_op);
1942                                 extent_op = NULL;
1943                         }
1944
1945                         if (extent_op) {
1946                                 spin_unlock(&delayed_refs->lock);
1947
1948                                 ret = run_delayed_extent_op(trans, root,
1949                                                             ref, extent_op);
1950                                 BUG_ON(ret);
1951                                 kfree(extent_op);
1952
1953                                 cond_resched();
1954                                 spin_lock(&delayed_refs->lock);
1955                                 continue;
1956                         }
1957
1958                         list_del_init(&locked_ref->cluster);
1959                         locked_ref = NULL;
1960                 }
1961
1962                 ref->in_tree = 0;
1963                 rb_erase(&ref->rb_node, &delayed_refs->root);
1964                 delayed_refs->num_entries--;
1965
1966                 spin_unlock(&delayed_refs->lock);
1967
1968                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
1969                                           must_insert_reserved);
1970                 BUG_ON(ret);
1971
1972                 btrfs_put_delayed_ref(ref);
1973                 kfree(extent_op);
1974                 count++;
1975
1976                 cond_resched();
1977                 spin_lock(&delayed_refs->lock);
1978         }
1979         return count;
1980 }
1981
1982 /*
1983  * this starts processing the delayed reference count updates and
1984  * extent insertions we have queued up so far.  count can be
1985  * 0, which means to process everything in the tree at the start
1986  * of the run (but not newly added entries), or it can be some target
1987  * number you'd like to process.
1988  */
1989 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1990                            struct btrfs_root *root, unsigned long count)
1991 {
1992         struct rb_node *node;
1993         struct btrfs_delayed_ref_root *delayed_refs;
1994         struct btrfs_delayed_ref_node *ref;
1995         struct list_head cluster;
1996         int ret;
1997         int run_all = count == (unsigned long)-1;
1998         int run_most = 0;
1999
2000         if (root == root->fs_info->extent_root)
2001                 root = root->fs_info->tree_root;
2002
2003         delayed_refs = &trans->transaction->delayed_refs;
2004         INIT_LIST_HEAD(&cluster);
2005 again:
2006         spin_lock(&delayed_refs->lock);
2007         if (count == 0) {
2008                 count = delayed_refs->num_entries * 2;
2009                 run_most = 1;
2010         }
2011         while (1) {
2012                 if (!(run_all || run_most) &&
2013                     delayed_refs->num_heads_ready < 64)
2014                         break;
2015
2016                 /*
2017                  * go find something we can process in the rbtree.  We start at
2018                  * the beginning of the tree, and then build a cluster
2019                  * of refs to process starting at the first one we are able to
2020                  * lock
2021                  */
2022                 ret = btrfs_find_ref_cluster(trans, &cluster,
2023                                              delayed_refs->run_delayed_start);
2024                 if (ret)
2025                         break;
2026
2027                 ret = run_clustered_refs(trans, root, &cluster);
2028                 BUG_ON(ret < 0);
2029
2030                 count -= min_t(unsigned long, ret, count);
2031
2032                 if (count == 0)
2033                         break;
2034         }
2035
2036         if (run_all) {
2037                 node = rb_first(&delayed_refs->root);
2038                 if (!node)
2039                         goto out;
2040                 count = (unsigned long)-1;
2041
2042                 while (node) {
2043                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2044                                        rb_node);
2045                         if (btrfs_delayed_ref_is_head(ref)) {
2046                                 struct btrfs_delayed_ref_head *head;
2047
2048                                 head = btrfs_delayed_node_to_head(ref);
2049                                 atomic_inc(&ref->refs);
2050
2051                                 spin_unlock(&delayed_refs->lock);
2052                                 mutex_lock(&head->mutex);
2053                                 mutex_unlock(&head->mutex);
2054
2055                                 btrfs_put_delayed_ref(ref);
2056                                 cond_resched();
2057                                 goto again;
2058                         }
2059                         node = rb_next(node);
2060                 }
2061                 spin_unlock(&delayed_refs->lock);
2062                 schedule_timeout(1);
2063                 goto again;
2064         }
2065 out:
2066         spin_unlock(&delayed_refs->lock);
2067         return 0;
2068 }
2069
2070 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2071                                 struct btrfs_root *root,
2072                                 u64 bytenr, u64 num_bytes, u64 flags,
2073                                 int is_data)
2074 {
2075         struct btrfs_delayed_extent_op *extent_op;
2076         int ret;
2077
2078         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2079         if (!extent_op)
2080                 return -ENOMEM;
2081
2082         extent_op->flags_to_set = flags;
2083         extent_op->update_flags = 1;
2084         extent_op->update_key = 0;
2085         extent_op->is_data = is_data ? 1 : 0;
2086
2087         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2088         if (ret)
2089                 kfree(extent_op);
2090         return ret;
2091 }
2092
2093 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2094                                       struct btrfs_root *root,
2095                                       struct btrfs_path *path,
2096                                       u64 objectid, u64 offset, u64 bytenr)
2097 {
2098         struct btrfs_delayed_ref_head *head;
2099         struct btrfs_delayed_ref_node *ref;
2100         struct btrfs_delayed_data_ref *data_ref;
2101         struct btrfs_delayed_ref_root *delayed_refs;
2102         struct rb_node *node;
2103         int ret = 0;
2104
2105         ret = -ENOENT;
2106         delayed_refs = &trans->transaction->delayed_refs;
2107         spin_lock(&delayed_refs->lock);
2108         head = btrfs_find_delayed_ref_head(trans, bytenr);
2109         if (!head)
2110                 goto out;
2111
2112         if (!mutex_trylock(&head->mutex)) {
2113                 atomic_inc(&head->node.refs);
2114                 spin_unlock(&delayed_refs->lock);
2115
2116                 btrfs_release_path(root->fs_info->extent_root, path);
2117
2118                 mutex_lock(&head->mutex);
2119                 mutex_unlock(&head->mutex);
2120                 btrfs_put_delayed_ref(&head->node);
2121                 return -EAGAIN;
2122         }
2123
2124         node = rb_prev(&head->node.rb_node);
2125         if (!node)
2126                 goto out_unlock;
2127
2128         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2129
2130         if (ref->bytenr != bytenr)
2131                 goto out_unlock;
2132
2133         ret = 1;
2134         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2135                 goto out_unlock;
2136
2137         data_ref = btrfs_delayed_node_to_data_ref(ref);
2138
2139         node = rb_prev(node);
2140         if (node) {
2141                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2142                 if (ref->bytenr == bytenr)
2143                         goto out_unlock;
2144         }
2145
2146         if (data_ref->root != root->root_key.objectid ||
2147             data_ref->objectid != objectid || data_ref->offset != offset)
2148                 goto out_unlock;
2149
2150         ret = 0;
2151 out_unlock:
2152         mutex_unlock(&head->mutex);
2153 out:
2154         spin_unlock(&delayed_refs->lock);
2155         return ret;
2156 }
2157
2158 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2159                                         struct btrfs_root *root,
2160                                         struct btrfs_path *path,
2161                                         u64 objectid, u64 offset, u64 bytenr)
2162 {
2163         struct btrfs_root *extent_root = root->fs_info->extent_root;
2164         struct extent_buffer *leaf;
2165         struct btrfs_extent_data_ref *ref;
2166         struct btrfs_extent_inline_ref *iref;
2167         struct btrfs_extent_item *ei;
2168         struct btrfs_key key;
2169         u32 item_size;
2170         int ret;
2171
2172         key.objectid = bytenr;
2173         key.offset = (u64)-1;
2174         key.type = BTRFS_EXTENT_ITEM_KEY;
2175
2176         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2177         if (ret < 0)
2178                 goto out;
2179         BUG_ON(ret == 0);
2180
2181         ret = -ENOENT;
2182         if (path->slots[0] == 0)
2183                 goto out;
2184
2185         path->slots[0]--;
2186         leaf = path->nodes[0];
2187         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2188
2189         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2190                 goto out;
2191
2192         ret = 1;
2193         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2194 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2195         if (item_size < sizeof(*ei)) {
2196                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2197                 goto out;
2198         }
2199 #endif
2200         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2201
2202         if (item_size != sizeof(*ei) +
2203             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2204                 goto out;
2205
2206         if (btrfs_extent_generation(leaf, ei) <=
2207             btrfs_root_last_snapshot(&root->root_item))
2208                 goto out;
2209
2210         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2211         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2212             BTRFS_EXTENT_DATA_REF_KEY)
2213                 goto out;
2214
2215         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2216         if (btrfs_extent_refs(leaf, ei) !=
2217             btrfs_extent_data_ref_count(leaf, ref) ||
2218             btrfs_extent_data_ref_root(leaf, ref) !=
2219             root->root_key.objectid ||
2220             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2221             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2222                 goto out;
2223
2224         ret = 0;
2225 out:
2226         return ret;
2227 }
2228
2229 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2230                           struct btrfs_root *root,
2231                           u64 objectid, u64 offset, u64 bytenr)
2232 {
2233         struct btrfs_path *path;
2234         int ret;
2235         int ret2;
2236
2237         path = btrfs_alloc_path();
2238         if (!path)
2239                 return -ENOENT;
2240
2241         do {
2242                 ret = check_committed_ref(trans, root, path, objectid,
2243                                           offset, bytenr);
2244                 if (ret && ret != -ENOENT)
2245                         goto out;
2246
2247                 ret2 = check_delayed_ref(trans, root, path, objectid,
2248                                          offset, bytenr);
2249         } while (ret2 == -EAGAIN);
2250
2251         if (ret2 && ret2 != -ENOENT) {
2252                 ret = ret2;
2253                 goto out;
2254         }
2255
2256         if (ret != -ENOENT || ret2 != -ENOENT)
2257                 ret = 0;
2258 out:
2259         btrfs_free_path(path);
2260         return ret;
2261 }
2262
2263 #if 0
2264 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2265                     struct extent_buffer *buf, u32 nr_extents)
2266 {
2267         struct btrfs_key key;
2268         struct btrfs_file_extent_item *fi;
2269         u64 root_gen;
2270         u32 nritems;
2271         int i;
2272         int level;
2273         int ret = 0;
2274         int shared = 0;
2275
2276         if (!root->ref_cows)
2277                 return 0;
2278
2279         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2280                 shared = 0;
2281                 root_gen = root->root_key.offset;
2282         } else {
2283                 shared = 1;
2284                 root_gen = trans->transid - 1;
2285         }
2286
2287         level = btrfs_header_level(buf);
2288         nritems = btrfs_header_nritems(buf);
2289
2290         if (level == 0) {
2291                 struct btrfs_leaf_ref *ref;
2292                 struct btrfs_extent_info *info;
2293
2294                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2295                 if (!ref) {
2296                         ret = -ENOMEM;
2297                         goto out;
2298                 }
2299
2300                 ref->root_gen = root_gen;
2301                 ref->bytenr = buf->start;
2302                 ref->owner = btrfs_header_owner(buf);
2303                 ref->generation = btrfs_header_generation(buf);
2304                 ref->nritems = nr_extents;
2305                 info = ref->extents;
2306
2307                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2308                         u64 disk_bytenr;
2309                         btrfs_item_key_to_cpu(buf, &key, i);
2310                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2311                                 continue;
2312                         fi = btrfs_item_ptr(buf, i,
2313                                             struct btrfs_file_extent_item);
2314                         if (btrfs_file_extent_type(buf, fi) ==
2315                             BTRFS_FILE_EXTENT_INLINE)
2316                                 continue;
2317                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2318                         if (disk_bytenr == 0)
2319                                 continue;
2320
2321                         info->bytenr = disk_bytenr;
2322                         info->num_bytes =
2323                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2324                         info->objectid = key.objectid;
2325                         info->offset = key.offset;
2326                         info++;
2327                 }
2328
2329                 ret = btrfs_add_leaf_ref(root, ref, shared);
2330                 if (ret == -EEXIST && shared) {
2331                         struct btrfs_leaf_ref *old;
2332                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2333                         BUG_ON(!old);
2334                         btrfs_remove_leaf_ref(root, old);
2335                         btrfs_free_leaf_ref(root, old);
2336                         ret = btrfs_add_leaf_ref(root, ref, shared);
2337                 }
2338                 WARN_ON(ret);
2339                 btrfs_free_leaf_ref(root, ref);
2340         }
2341 out:
2342         return ret;
2343 }
2344
2345 /* when a block goes through cow, we update the reference counts of
2346  * everything that block points to.  The internal pointers of the block
2347  * can be in just about any order, and it is likely to have clusters of
2348  * things that are close together and clusters of things that are not.
2349  *
2350  * To help reduce the seeks that come with updating all of these reference
2351  * counts, sort them by byte number before actual updates are done.
2352  *
2353  * struct refsort is used to match byte number to slot in the btree block.
2354  * we sort based on the byte number and then use the slot to actually
2355  * find the item.
2356  *
2357  * struct refsort is smaller than strcut btrfs_item and smaller than
2358  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2359  * for a btree block, there's no way for a kmalloc of refsorts for a
2360  * single node to be bigger than a page.
2361  */
2362 struct refsort {
2363         u64 bytenr;
2364         u32 slot;
2365 };
2366
2367 /*
2368  * for passing into sort()
2369  */
2370 static int refsort_cmp(const void *a_void, const void *b_void)
2371 {
2372         const struct refsort *a = a_void;
2373         const struct refsort *b = b_void;
2374
2375         if (a->bytenr < b->bytenr)
2376                 return -1;
2377         if (a->bytenr > b->bytenr)
2378                 return 1;
2379         return 0;
2380 }
2381 #endif
2382
2383 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2384                            struct btrfs_root *root,
2385                            struct extent_buffer *buf,
2386                            int full_backref, int inc)
2387 {
2388         u64 bytenr;
2389         u64 num_bytes;
2390         u64 parent;
2391         u64 ref_root;
2392         u32 nritems;
2393         struct btrfs_key key;
2394         struct btrfs_file_extent_item *fi;
2395         int i;
2396         int level;
2397         int ret = 0;
2398         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2399                             u64, u64, u64, u64, u64, u64);
2400
2401         ref_root = btrfs_header_owner(buf);
2402         nritems = btrfs_header_nritems(buf);
2403         level = btrfs_header_level(buf);
2404
2405         if (!root->ref_cows && level == 0)
2406                 return 0;
2407
2408         if (inc)
2409                 process_func = btrfs_inc_extent_ref;
2410         else
2411                 process_func = btrfs_free_extent;
2412
2413         if (full_backref)
2414                 parent = buf->start;
2415         else
2416                 parent = 0;
2417
2418         for (i = 0; i < nritems; i++) {
2419                 if (level == 0) {
2420                         btrfs_item_key_to_cpu(buf, &key, i);
2421                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2422                                 continue;
2423                         fi = btrfs_item_ptr(buf, i,
2424                                             struct btrfs_file_extent_item);
2425                         if (btrfs_file_extent_type(buf, fi) ==
2426                             BTRFS_FILE_EXTENT_INLINE)
2427                                 continue;
2428                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2429                         if (bytenr == 0)
2430                                 continue;
2431
2432                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2433                         key.offset -= btrfs_file_extent_offset(buf, fi);
2434                         ret = process_func(trans, root, bytenr, num_bytes,
2435                                            parent, ref_root, key.objectid,
2436                                            key.offset);
2437                         if (ret)
2438                                 goto fail;
2439                 } else {
2440                         bytenr = btrfs_node_blockptr(buf, i);
2441                         num_bytes = btrfs_level_size(root, level - 1);
2442                         ret = process_func(trans, root, bytenr, num_bytes,
2443                                            parent, ref_root, level - 1, 0);
2444                         if (ret)
2445                                 goto fail;
2446                 }
2447         }
2448         return 0;
2449 fail:
2450         BUG();
2451         return ret;
2452 }
2453
2454 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2455                   struct extent_buffer *buf, int full_backref)
2456 {
2457         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2458 }
2459
2460 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2461                   struct extent_buffer *buf, int full_backref)
2462 {
2463         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2464 }
2465
2466 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2467                                  struct btrfs_root *root,
2468                                  struct btrfs_path *path,
2469                                  struct btrfs_block_group_cache *cache)
2470 {
2471         int ret;
2472         struct btrfs_root *extent_root = root->fs_info->extent_root;
2473         unsigned long bi;
2474         struct extent_buffer *leaf;
2475
2476         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2477         if (ret < 0)
2478                 goto fail;
2479         BUG_ON(ret);
2480
2481         leaf = path->nodes[0];
2482         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2483         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2484         btrfs_mark_buffer_dirty(leaf);
2485         btrfs_release_path(extent_root, path);
2486 fail:
2487         if (ret)
2488                 return ret;
2489         return 0;
2490
2491 }
2492
2493 static struct btrfs_block_group_cache *
2494 next_block_group(struct btrfs_root *root,
2495                  struct btrfs_block_group_cache *cache)
2496 {
2497         struct rb_node *node;
2498         spin_lock(&root->fs_info->block_group_cache_lock);
2499         node = rb_next(&cache->cache_node);
2500         btrfs_put_block_group(cache);
2501         if (node) {
2502                 cache = rb_entry(node, struct btrfs_block_group_cache,
2503                                  cache_node);
2504                 atomic_inc(&cache->count);
2505         } else
2506                 cache = NULL;
2507         spin_unlock(&root->fs_info->block_group_cache_lock);
2508         return cache;
2509 }
2510
2511 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2512                                    struct btrfs_root *root)
2513 {
2514         struct btrfs_block_group_cache *cache;
2515         int err = 0;
2516         struct btrfs_path *path;
2517         u64 last = 0;
2518
2519         path = btrfs_alloc_path();
2520         if (!path)
2521                 return -ENOMEM;
2522
2523         while (1) {
2524                 if (last == 0) {
2525                         err = btrfs_run_delayed_refs(trans, root,
2526                                                      (unsigned long)-1);
2527                         BUG_ON(err);
2528                 }
2529
2530                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2531                 while (cache) {
2532                         if (cache->dirty)
2533                                 break;
2534                         cache = next_block_group(root, cache);
2535                 }
2536                 if (!cache) {
2537                         if (last == 0)
2538                                 break;
2539                         last = 0;
2540                         continue;
2541                 }
2542
2543                 cache->dirty = 0;
2544                 last = cache->key.objectid + cache->key.offset;
2545
2546                 err = write_one_cache_group(trans, root, path, cache);
2547                 BUG_ON(err);
2548                 btrfs_put_block_group(cache);
2549         }
2550
2551         btrfs_free_path(path);
2552         return 0;
2553 }
2554
2555 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2556 {
2557         struct btrfs_block_group_cache *block_group;
2558         int readonly = 0;
2559
2560         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2561         if (!block_group || block_group->ro)
2562                 readonly = 1;
2563         if (block_group)
2564                 btrfs_put_block_group(block_group);
2565         return readonly;
2566 }
2567
2568 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2569                              u64 total_bytes, u64 bytes_used,
2570                              struct btrfs_space_info **space_info)
2571 {
2572         struct btrfs_space_info *found;
2573
2574         found = __find_space_info(info, flags);
2575         if (found) {
2576                 spin_lock(&found->lock);
2577                 found->total_bytes += total_bytes;
2578                 found->bytes_used += bytes_used;
2579                 found->full = 0;
2580                 spin_unlock(&found->lock);
2581                 *space_info = found;
2582                 return 0;
2583         }
2584         found = kzalloc(sizeof(*found), GFP_NOFS);
2585         if (!found)
2586                 return -ENOMEM;
2587
2588         INIT_LIST_HEAD(&found->block_groups);
2589         init_rwsem(&found->groups_sem);
2590         spin_lock_init(&found->lock);
2591         found->flags = flags;
2592         found->total_bytes = total_bytes;
2593         found->bytes_used = bytes_used;
2594         found->bytes_pinned = 0;
2595         found->bytes_reserved = 0;
2596         found->bytes_readonly = 0;
2597         found->bytes_delalloc = 0;
2598         found->full = 0;
2599         found->force_alloc = 0;
2600         *space_info = found;
2601         list_add_rcu(&found->list, &info->space_info);
2602         atomic_set(&found->caching_threads, 0);
2603         return 0;
2604 }
2605
2606 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2607 {
2608         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2609                                    BTRFS_BLOCK_GROUP_RAID1 |
2610                                    BTRFS_BLOCK_GROUP_RAID10 |
2611                                    BTRFS_BLOCK_GROUP_DUP);
2612         if (extra_flags) {
2613                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2614                         fs_info->avail_data_alloc_bits |= extra_flags;
2615                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2616                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2617                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2618                         fs_info->avail_system_alloc_bits |= extra_flags;
2619         }
2620 }
2621
2622 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2623 {
2624         spin_lock(&cache->space_info->lock);
2625         spin_lock(&cache->lock);
2626         if (!cache->ro) {
2627                 cache->space_info->bytes_readonly += cache->key.offset -
2628                                         btrfs_block_group_used(&cache->item);
2629                 cache->ro = 1;
2630         }
2631         spin_unlock(&cache->lock);
2632         spin_unlock(&cache->space_info->lock);
2633 }
2634
2635 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2636 {
2637         u64 num_devices = root->fs_info->fs_devices->rw_devices;
2638
2639         if (num_devices == 1)
2640                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2641         if (num_devices < 4)
2642                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2643
2644         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2645             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2646                       BTRFS_BLOCK_GROUP_RAID10))) {
2647                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2648         }
2649
2650         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2651             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2652                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2653         }
2654
2655         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2656             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2657              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2658              (flags & BTRFS_BLOCK_GROUP_DUP)))
2659                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2660         return flags;
2661 }
2662
2663 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2664 {
2665         struct btrfs_fs_info *info = root->fs_info;
2666         u64 alloc_profile;
2667
2668         if (data) {
2669                 alloc_profile = info->avail_data_alloc_bits &
2670                         info->data_alloc_profile;
2671                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2672         } else if (root == root->fs_info->chunk_root) {
2673                 alloc_profile = info->avail_system_alloc_bits &
2674                         info->system_alloc_profile;
2675                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2676         } else {
2677                 alloc_profile = info->avail_metadata_alloc_bits &
2678                         info->metadata_alloc_profile;
2679                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2680         }
2681
2682         return btrfs_reduce_alloc_profile(root, data);
2683 }
2684
2685 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2686 {
2687         u64 alloc_target;
2688
2689         alloc_target = btrfs_get_alloc_profile(root, 1);
2690         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2691                                                        alloc_target);
2692 }
2693
2694 /*
2695  * for now this just makes sure we have at least 5% of our metadata space free
2696  * for use.
2697  */
2698 int btrfs_check_metadata_free_space(struct btrfs_root *root)
2699 {
2700         struct btrfs_fs_info *info = root->fs_info;
2701         struct btrfs_space_info *meta_sinfo;
2702         u64 alloc_target, thresh;
2703         int committed = 0, ret;
2704
2705         /* get the space info for where the metadata will live */
2706         alloc_target = btrfs_get_alloc_profile(root, 0);
2707         meta_sinfo = __find_space_info(info, alloc_target);
2708
2709 again:
2710         spin_lock(&meta_sinfo->lock);
2711         if (!meta_sinfo->full)
2712                 thresh = meta_sinfo->total_bytes * 80;
2713         else
2714                 thresh = meta_sinfo->total_bytes * 95;
2715
2716         do_div(thresh, 100);
2717
2718         if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2719             meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2720                 struct btrfs_trans_handle *trans;
2721                 if (!meta_sinfo->full) {
2722                         meta_sinfo->force_alloc = 1;
2723                         spin_unlock(&meta_sinfo->lock);
2724
2725                         trans = btrfs_start_transaction(root, 1);
2726                         if (!trans)
2727                                 return -ENOMEM;
2728
2729                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2730                                              2 * 1024 * 1024, alloc_target, 0);
2731                         btrfs_end_transaction(trans, root);
2732                         goto again;
2733                 }
2734                 spin_unlock(&meta_sinfo->lock);
2735
2736                 if (!committed) {
2737                         committed = 1;
2738                         trans = btrfs_join_transaction(root, 1);
2739                         if (!trans)
2740                                 return -ENOMEM;
2741                         ret = btrfs_commit_transaction(trans, root);
2742                         if (ret)
2743                                 return ret;
2744                         goto again;
2745                 }
2746                 return -ENOSPC;
2747         }
2748         spin_unlock(&meta_sinfo->lock);
2749
2750         return 0;
2751 }
2752
2753 /*
2754  * This will check the space that the inode allocates from to make sure we have
2755  * enough space for bytes.
2756  */
2757 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2758                                 u64 bytes)
2759 {
2760         struct btrfs_space_info *data_sinfo;
2761         int ret = 0, committed = 0;
2762
2763         /* make sure bytes are sectorsize aligned */
2764         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2765
2766         data_sinfo = BTRFS_I(inode)->space_info;
2767 again:
2768         /* make sure we have enough space to handle the data first */
2769         spin_lock(&data_sinfo->lock);
2770         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2771             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2772             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2773             data_sinfo->bytes_may_use < bytes) {
2774                 struct btrfs_trans_handle *trans;
2775
2776                 /*
2777                  * if we don't have enough free bytes in this space then we need
2778                  * to alloc a new chunk.
2779                  */
2780                 if (!data_sinfo->full) {
2781                         u64 alloc_target;
2782
2783                         data_sinfo->force_alloc = 1;
2784                         spin_unlock(&data_sinfo->lock);
2785
2786                         alloc_target = btrfs_get_alloc_profile(root, 1);
2787                         trans = btrfs_start_transaction(root, 1);
2788                         if (!trans)
2789                                 return -ENOMEM;
2790
2791                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2792                                              bytes + 2 * 1024 * 1024,
2793                                              alloc_target, 0);
2794                         btrfs_end_transaction(trans, root);
2795                         if (ret)
2796                                 return ret;
2797                         goto again;
2798                 }
2799                 spin_unlock(&data_sinfo->lock);
2800
2801                 /* commit the current transaction and try again */
2802                 if (!committed) {
2803                         committed = 1;
2804                         trans = btrfs_join_transaction(root, 1);
2805                         if (!trans)
2806                                 return -ENOMEM;
2807                         ret = btrfs_commit_transaction(trans, root);
2808                         if (ret)
2809                                 return ret;
2810                         goto again;
2811                 }
2812
2813                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2814                        ", %llu bytes_used, %llu bytes_reserved, "
2815                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2816                        "%llu total\n", (unsigned long long)bytes,
2817                        (unsigned long long)data_sinfo->bytes_delalloc,
2818                        (unsigned long long)data_sinfo->bytes_used,
2819                        (unsigned long long)data_sinfo->bytes_reserved,
2820                        (unsigned long long)data_sinfo->bytes_pinned,
2821                        (unsigned long long)data_sinfo->bytes_readonly,
2822                        (unsigned long long)data_sinfo->bytes_may_use,
2823                        (unsigned long long)data_sinfo->total_bytes);
2824                 return -ENOSPC;
2825         }
2826         data_sinfo->bytes_may_use += bytes;
2827         BTRFS_I(inode)->reserved_bytes += bytes;
2828         spin_unlock(&data_sinfo->lock);
2829
2830         return btrfs_check_metadata_free_space(root);
2831 }
2832
2833 /*
2834  * if there was an error for whatever reason after calling
2835  * btrfs_check_data_free_space, call this so we can cleanup the counters.
2836  */
2837 void btrfs_free_reserved_data_space(struct btrfs_root *root,
2838                                     struct inode *inode, u64 bytes)
2839 {
2840         struct btrfs_space_info *data_sinfo;
2841
2842         /* make sure bytes are sectorsize aligned */
2843         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2844
2845         data_sinfo = BTRFS_I(inode)->space_info;
2846         spin_lock(&data_sinfo->lock);
2847         data_sinfo->bytes_may_use -= bytes;
2848         BTRFS_I(inode)->reserved_bytes -= bytes;
2849         spin_unlock(&data_sinfo->lock);
2850 }
2851
2852 /* called when we are adding a delalloc extent to the inode's io_tree */
2853 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2854                                   u64 bytes)
2855 {
2856         struct btrfs_space_info *data_sinfo;
2857
2858         /* get the space info for where this inode will be storing its data */
2859         data_sinfo = BTRFS_I(inode)->space_info;
2860
2861         /* make sure we have enough space to handle the data first */
2862         spin_lock(&data_sinfo->lock);
2863         data_sinfo->bytes_delalloc += bytes;
2864
2865         /*
2866          * we are adding a delalloc extent without calling
2867          * btrfs_check_data_free_space first.  This happens on a weird
2868          * writepage condition, but shouldn't hurt our accounting
2869          */
2870         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2871                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2872                 BTRFS_I(inode)->reserved_bytes = 0;
2873         } else {
2874                 data_sinfo->bytes_may_use -= bytes;
2875                 BTRFS_I(inode)->reserved_bytes -= bytes;
2876         }
2877
2878         spin_unlock(&data_sinfo->lock);
2879 }
2880
2881 /* called when we are clearing an delalloc extent from the inode's io_tree */
2882 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2883                               u64 bytes)
2884 {
2885         struct btrfs_space_info *info;
2886
2887         info = BTRFS_I(inode)->space_info;
2888
2889         spin_lock(&info->lock);
2890         info->bytes_delalloc -= bytes;
2891         spin_unlock(&info->lock);
2892 }
2893
2894 static void force_metadata_allocation(struct btrfs_fs_info *info)
2895 {
2896         struct list_head *head = &info->space_info;
2897         struct btrfs_space_info *found;
2898
2899         rcu_read_lock();
2900         list_for_each_entry_rcu(found, head, list) {
2901                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2902                         found->force_alloc = 1;
2903         }
2904         rcu_read_unlock();
2905 }
2906
2907 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
2908                           struct btrfs_root *extent_root, u64 alloc_bytes,
2909                           u64 flags, int force)
2910 {
2911         struct btrfs_space_info *space_info;
2912         struct btrfs_fs_info *fs_info = extent_root->fs_info;
2913         u64 thresh;
2914         int ret = 0;
2915
2916         mutex_lock(&fs_info->chunk_mutex);
2917
2918         flags = btrfs_reduce_alloc_profile(extent_root, flags);
2919
2920         space_info = __find_space_info(extent_root->fs_info, flags);
2921         if (!space_info) {
2922                 ret = update_space_info(extent_root->fs_info, flags,
2923                                         0, 0, &space_info);
2924                 BUG_ON(ret);
2925         }
2926         BUG_ON(!space_info);
2927
2928         spin_lock(&space_info->lock);
2929         if (space_info->force_alloc) {
2930                 force = 1;
2931                 space_info->force_alloc = 0;
2932         }
2933         if (space_info->full) {
2934                 spin_unlock(&space_info->lock);
2935                 goto out;
2936         }
2937
2938         thresh = space_info->total_bytes - space_info->bytes_readonly;
2939         thresh = div_factor(thresh, 6);
2940         if (!force &&
2941            (space_info->bytes_used + space_info->bytes_pinned +
2942             space_info->bytes_reserved + alloc_bytes) < thresh) {
2943                 spin_unlock(&space_info->lock);
2944                 goto out;
2945         }
2946         spin_unlock(&space_info->lock);
2947
2948         /*
2949          * if we're doing a data chunk, go ahead and make sure that
2950          * we keep a reasonable number of metadata chunks allocated in the
2951          * FS as well.
2952          */
2953         if (flags & BTRFS_BLOCK_GROUP_DATA) {
2954                 fs_info->data_chunk_allocations++;
2955                 if (!(fs_info->data_chunk_allocations %
2956                       fs_info->metadata_ratio))
2957                         force_metadata_allocation(fs_info);
2958         }
2959
2960         ret = btrfs_alloc_chunk(trans, extent_root, flags);
2961         if (ret)
2962                 space_info->full = 1;
2963 out:
2964         mutex_unlock(&extent_root->fs_info->chunk_mutex);
2965         return ret;
2966 }
2967
2968 static int update_block_group(struct btrfs_trans_handle *trans,
2969                               struct btrfs_root *root,
2970                               u64 bytenr, u64 num_bytes, int alloc,
2971                               int mark_free)
2972 {
2973         struct btrfs_block_group_cache *cache;
2974         struct btrfs_fs_info *info = root->fs_info;
2975         u64 total = num_bytes;
2976         u64 old_val;
2977         u64 byte_in_group;
2978
2979         /* block accounting for super block */
2980         spin_lock(&info->delalloc_lock);
2981         old_val = btrfs_super_bytes_used(&info->super_copy);
2982         if (alloc)
2983                 old_val += num_bytes;
2984         else
2985                 old_val -= num_bytes;
2986         btrfs_set_super_bytes_used(&info->super_copy, old_val);
2987
2988         /* block accounting for root item */
2989         old_val = btrfs_root_used(&root->root_item);
2990         if (alloc)
2991                 old_val += num_bytes;
2992         else
2993                 old_val -= num_bytes;
2994         btrfs_set_root_used(&root->root_item, old_val);
2995         spin_unlock(&info->delalloc_lock);
2996
2997         while (total) {
2998                 cache = btrfs_lookup_block_group(info, bytenr);
2999                 if (!cache)
3000                         return -1;
3001                 byte_in_group = bytenr - cache->key.objectid;
3002                 WARN_ON(byte_in_group > cache->key.offset);
3003
3004                 spin_lock(&cache->space_info->lock);
3005                 spin_lock(&cache->lock);
3006                 cache->dirty = 1;
3007                 old_val = btrfs_block_group_used(&cache->item);
3008                 num_bytes = min(total, cache->key.offset - byte_in_group);
3009                 if (alloc) {
3010                         old_val += num_bytes;
3011                         cache->space_info->bytes_used += num_bytes;
3012                         if (cache->ro)
3013                                 cache->space_info->bytes_readonly -= num_bytes;
3014                         btrfs_set_block_group_used(&cache->item, old_val);
3015                         spin_unlock(&cache->lock);
3016                         spin_unlock(&cache->space_info->lock);
3017                 } else {
3018                         old_val -= num_bytes;
3019                         cache->space_info->bytes_used -= num_bytes;
3020                         if (cache->ro)
3021                                 cache->space_info->bytes_readonly += num_bytes;
3022                         btrfs_set_block_group_used(&cache->item, old_val);
3023                         spin_unlock(&cache->lock);
3024                         spin_unlock(&cache->space_info->lock);
3025                         if (mark_free) {
3026                                 int ret;
3027
3028                                 ret = btrfs_discard_extent(root, bytenr,
3029                                                            num_bytes);
3030                                 WARN_ON(ret);
3031
3032                                 ret = btrfs_add_free_space(cache, bytenr,
3033                                                            num_bytes);
3034                                 WARN_ON(ret);
3035                         }
3036                 }
3037                 btrfs_put_block_group(cache);
3038                 total -= num_bytes;
3039                 bytenr += num_bytes;
3040         }
3041         return 0;
3042 }
3043
3044 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3045 {
3046         struct btrfs_block_group_cache *cache;
3047         u64 bytenr;
3048
3049         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3050         if (!cache)
3051                 return 0;
3052
3053         bytenr = cache->key.objectid;
3054         btrfs_put_block_group(cache);
3055
3056         return bytenr;
3057 }
3058
3059 int btrfs_update_pinned_extents(struct btrfs_root *root,
3060                                 u64 bytenr, u64 num, int pin)
3061 {
3062         u64 len;
3063         struct btrfs_block_group_cache *cache;
3064         struct btrfs_fs_info *fs_info = root->fs_info;
3065
3066         if (pin)
3067                 set_extent_dirty(&fs_info->pinned_extents,
3068                                 bytenr, bytenr + num - 1, GFP_NOFS);
3069
3070         while (num > 0) {
3071                 cache = btrfs_lookup_block_group(fs_info, bytenr);
3072                 BUG_ON(!cache);
3073                 len = min(num, cache->key.offset -
3074                           (bytenr - cache->key.objectid));
3075                 if (pin) {
3076                         spin_lock(&cache->space_info->lock);
3077                         spin_lock(&cache->lock);
3078                         cache->pinned += len;
3079                         cache->space_info->bytes_pinned += len;
3080                         spin_unlock(&cache->lock);
3081                         spin_unlock(&cache->space_info->lock);
3082                         fs_info->total_pinned += len;
3083                 } else {
3084                         int unpin = 0;
3085
3086                         /*
3087                          * in order to not race with the block group caching, we
3088                          * only want to unpin the extent if we are cached.  If
3089                          * we aren't cached, we want to start async caching this
3090                          * block group so we can free the extent the next time
3091                          * around.
3092                          */
3093                         spin_lock(&cache->space_info->lock);
3094                         spin_lock(&cache->lock);
3095                         unpin = (cache->cached == BTRFS_CACHE_FINISHED);
3096                         if (likely(unpin)) {
3097                                 cache->pinned -= len;
3098                                 cache->space_info->bytes_pinned -= len;
3099                                 fs_info->total_pinned -= len;
3100                         }
3101                         spin_unlock(&cache->lock);
3102                         spin_unlock(&cache->space_info->lock);
3103
3104                         if (likely(unpin))
3105                                 clear_extent_dirty(&fs_info->pinned_extents,
3106                                                    bytenr, bytenr + len -1,
3107                                                    GFP_NOFS);
3108                         else
3109                                 cache_block_group(cache);
3110
3111                         if (unpin)
3112                                 btrfs_add_free_space(cache, bytenr, len);
3113                 }
3114                 btrfs_put_block_group(cache);
3115                 bytenr += len;
3116                 num -= len;
3117         }
3118         return 0;
3119 }
3120
3121 static int update_reserved_extents(struct btrfs_root *root,
3122                                    u64 bytenr, u64 num, int reserve)
3123 {
3124         u64 len;
3125         struct btrfs_block_group_cache *cache;
3126         struct btrfs_fs_info *fs_info = root->fs_info;
3127
3128         while (num > 0) {
3129                 cache = btrfs_lookup_block_group(fs_info, bytenr);
3130                 BUG_ON(!cache);
3131                 len = min(num, cache->key.offset -
3132                           (bytenr - cache->key.objectid));
3133
3134                 spin_lock(&cache->space_info->lock);
3135                 spin_lock(&cache->lock);
3136                 if (reserve) {
3137                         cache->reserved += len;
3138                         cache->space_info->bytes_reserved += len;
3139                 } else {
3140                         cache->reserved -= len;
3141                         cache->space_info->bytes_reserved -= len;
3142                 }
3143                 spin_unlock(&cache->lock);
3144                 spin_unlock(&cache->space_info->lock);
3145                 btrfs_put_block_group(cache);
3146                 bytenr += len;
3147                 num -= len;
3148         }
3149         return 0;
3150 }
3151
3152 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
3153 {
3154         u64 last = 0;
3155         u64 start;
3156         u64 end;
3157         struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
3158         int ret;
3159
3160         while (1) {
3161                 ret = find_first_extent_bit(pinned_extents, last,
3162                                             &start, &end, EXTENT_DIRTY);
3163                 if (ret)
3164                         break;
3165
3166                 set_extent_dirty(copy, start, end, GFP_NOFS);
3167                 last = end + 1;
3168         }
3169         return 0;
3170 }