Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs...
[~shefty/rdma-dev.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46
47 static struct extent_io_ops btree_extent_io_ops;
48 static void end_workqueue_fn(struct btrfs_work *work);
49 static void free_fs_root(struct btrfs_root *root);
50 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
51                                     int read_only);
52 static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
53 static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
54 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
55                                       struct btrfs_root *root);
56 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
57 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
58 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
59                                         struct extent_io_tree *dirty_pages,
60                                         int mark);
61 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
62                                        struct extent_io_tree *pinned_extents);
63 static int btrfs_cleanup_transaction(struct btrfs_root *root);
64
65 /*
66  * end_io_wq structs are used to do processing in task context when an IO is
67  * complete.  This is used during reads to verify checksums, and it is used
68  * by writes to insert metadata for new file extents after IO is complete.
69  */
70 struct end_io_wq {
71         struct bio *bio;
72         bio_end_io_t *end_io;
73         void *private;
74         struct btrfs_fs_info *info;
75         int error;
76         int metadata;
77         struct list_head list;
78         struct btrfs_work work;
79 };
80
81 /*
82  * async submit bios are used to offload expensive checksumming
83  * onto the worker threads.  They checksum file and metadata bios
84  * just before they are sent down the IO stack.
85  */
86 struct async_submit_bio {
87         struct inode *inode;
88         struct bio *bio;
89         struct list_head list;
90         extent_submit_bio_hook_t *submit_bio_start;
91         extent_submit_bio_hook_t *submit_bio_done;
92         int rw;
93         int mirror_num;
94         unsigned long bio_flags;
95         /*
96          * bio_offset is optional, can be used if the pages in the bio
97          * can't tell us where in the file the bio should go
98          */
99         u64 bio_offset;
100         struct btrfs_work work;
101 };
102
103 /*
104  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
105  * eb, the lockdep key is determined by the btrfs_root it belongs to and
106  * the level the eb occupies in the tree.
107  *
108  * Different roots are used for different purposes and may nest inside each
109  * other and they require separate keysets.  As lockdep keys should be
110  * static, assign keysets according to the purpose of the root as indicated
111  * by btrfs_root->objectid.  This ensures that all special purpose roots
112  * have separate keysets.
113  *
114  * Lock-nesting across peer nodes is always done with the immediate parent
115  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
116  * subclass to avoid triggering lockdep warning in such cases.
117  *
118  * The key is set by the readpage_end_io_hook after the buffer has passed
119  * csum validation but before the pages are unlocked.  It is also set by
120  * btrfs_init_new_buffer on freshly allocated blocks.
121  *
122  * We also add a check to make sure the highest level of the tree is the
123  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
124  * needs update as well.
125  */
126 #ifdef CONFIG_DEBUG_LOCK_ALLOC
127 # if BTRFS_MAX_LEVEL != 8
128 #  error
129 # endif
130
131 static struct btrfs_lockdep_keyset {
132         u64                     id;             /* root objectid */
133         const char              *name_stem;     /* lock name stem */
134         char                    names[BTRFS_MAX_LEVEL + 1][20];
135         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
136 } btrfs_lockdep_keysets[] = {
137         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
138         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
139         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
140         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
141         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
142         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
143         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
144         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
145         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
146         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
147         { .id = 0,                              .name_stem = "tree"     },
148 };
149
150 void __init btrfs_init_lockdep(void)
151 {
152         int i, j;
153
154         /* initialize lockdep class names */
155         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
156                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
157
158                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
159                         snprintf(ks->names[j], sizeof(ks->names[j]),
160                                  "btrfs-%s-%02d", ks->name_stem, j);
161         }
162 }
163
164 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
165                                     int level)
166 {
167         struct btrfs_lockdep_keyset *ks;
168
169         BUG_ON(level >= ARRAY_SIZE(ks->keys));
170
171         /* find the matching keyset, id 0 is the default entry */
172         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
173                 if (ks->id == objectid)
174                         break;
175
176         lockdep_set_class_and_name(&eb->lock,
177                                    &ks->keys[level], ks->names[level]);
178 }
179
180 #endif
181
182 /*
183  * extents on the btree inode are pretty simple, there's one extent
184  * that covers the entire device
185  */
186 static struct extent_map *btree_get_extent(struct inode *inode,
187                 struct page *page, size_t pg_offset, u64 start, u64 len,
188                 int create)
189 {
190         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
191         struct extent_map *em;
192         int ret;
193
194         read_lock(&em_tree->lock);
195         em = lookup_extent_mapping(em_tree, start, len);
196         if (em) {
197                 em->bdev =
198                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
199                 read_unlock(&em_tree->lock);
200                 goto out;
201         }
202         read_unlock(&em_tree->lock);
203
204         em = alloc_extent_map();
205         if (!em) {
206                 em = ERR_PTR(-ENOMEM);
207                 goto out;
208         }
209         em->start = 0;
210         em->len = (u64)-1;
211         em->block_len = (u64)-1;
212         em->block_start = 0;
213         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
214
215         write_lock(&em_tree->lock);
216         ret = add_extent_mapping(em_tree, em);
217         if (ret == -EEXIST) {
218                 u64 failed_start = em->start;
219                 u64 failed_len = em->len;
220
221                 free_extent_map(em);
222                 em = lookup_extent_mapping(em_tree, start, len);
223                 if (em) {
224                         ret = 0;
225                 } else {
226                         em = lookup_extent_mapping(em_tree, failed_start,
227                                                    failed_len);
228                         ret = -EIO;
229                 }
230         } else if (ret) {
231                 free_extent_map(em);
232                 em = NULL;
233         }
234         write_unlock(&em_tree->lock);
235
236         if (ret)
237                 em = ERR_PTR(ret);
238 out:
239         return em;
240 }
241
242 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
243 {
244         return crc32c(seed, data, len);
245 }
246
247 void btrfs_csum_final(u32 crc, char *result)
248 {
249         put_unaligned_le32(~crc, result);
250 }
251
252 /*
253  * compute the csum for a btree block, and either verify it or write it
254  * into the csum field of the block.
255  */
256 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
257                            int verify)
258 {
259         u16 csum_size =
260                 btrfs_super_csum_size(&root->fs_info->super_copy);
261         char *result = NULL;
262         unsigned long len;
263         unsigned long cur_len;
264         unsigned long offset = BTRFS_CSUM_SIZE;
265         char *kaddr;
266         unsigned long map_start;
267         unsigned long map_len;
268         int err;
269         u32 crc = ~(u32)0;
270         unsigned long inline_result;
271
272         len = buf->len - offset;
273         while (len > 0) {
274                 err = map_private_extent_buffer(buf, offset, 32,
275                                         &kaddr, &map_start, &map_len);
276                 if (err)
277                         return 1;
278                 cur_len = min(len, map_len - (offset - map_start));
279                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
280                                       crc, cur_len);
281                 len -= cur_len;
282                 offset += cur_len;
283         }
284         if (csum_size > sizeof(inline_result)) {
285                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
286                 if (!result)
287                         return 1;
288         } else {
289                 result = (char *)&inline_result;
290         }
291
292         btrfs_csum_final(crc, result);
293
294         if (verify) {
295                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
296                         u32 val;
297                         u32 found = 0;
298                         memcpy(&found, result, csum_size);
299
300                         read_extent_buffer(buf, &val, 0, csum_size);
301                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
302                                        "failed on %llu wanted %X found %X "
303                                        "level %d\n",
304                                        root->fs_info->sb->s_id,
305                                        (unsigned long long)buf->start, val, found,
306                                        btrfs_header_level(buf));
307                         if (result != (char *)&inline_result)
308                                 kfree(result);
309                         return 1;
310                 }
311         } else {
312                 write_extent_buffer(buf, result, 0, csum_size);
313         }
314         if (result != (char *)&inline_result)
315                 kfree(result);
316         return 0;
317 }
318
319 /*
320  * we can't consider a given block up to date unless the transid of the
321  * block matches the transid in the parent node's pointer.  This is how we
322  * detect blocks that either didn't get written at all or got written
323  * in the wrong place.
324  */
325 static int verify_parent_transid(struct extent_io_tree *io_tree,
326                                  struct extent_buffer *eb, u64 parent_transid)
327 {
328         struct extent_state *cached_state = NULL;
329         int ret;
330
331         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
332                 return 0;
333
334         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335                          0, &cached_state, GFP_NOFS);
336         if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
337             btrfs_header_generation(eb) == parent_transid) {
338                 ret = 0;
339                 goto out;
340         }
341         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
342                        "found %llu\n",
343                        (unsigned long long)eb->start,
344                        (unsigned long long)parent_transid,
345                        (unsigned long long)btrfs_header_generation(eb));
346         ret = 1;
347         clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
348 out:
349         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
350                              &cached_state, GFP_NOFS);
351         return ret;
352 }
353
354 /*
355  * helper to read a given tree block, doing retries as required when
356  * the checksums don't match and we have alternate mirrors to try.
357  */
358 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
359                                           struct extent_buffer *eb,
360                                           u64 start, u64 parent_transid)
361 {
362         struct extent_io_tree *io_tree;
363         int ret;
364         int num_copies = 0;
365         int mirror_num = 0;
366
367         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
368         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
369         while (1) {
370                 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
371                                                btree_get_extent, mirror_num);
372                 if (!ret &&
373                     !verify_parent_transid(io_tree, eb, parent_transid))
374                         return ret;
375
376                 /*
377                  * This buffer's crc is fine, but its contents are corrupted, so
378                  * there is no reason to read the other copies, they won't be
379                  * any less wrong.
380                  */
381                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
382                         return ret;
383
384                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
385                                               eb->start, eb->len);
386                 if (num_copies == 1)
387                         return ret;
388
389                 mirror_num++;
390                 if (mirror_num > num_copies)
391                         return ret;
392         }
393         return -EIO;
394 }
395
396 /*
397  * checksum a dirty tree block before IO.  This has extra checks to make sure
398  * we only fill in the checksum field in the first page of a multi-page block
399  */
400
401 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
402 {
403         struct extent_io_tree *tree;
404         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
405         u64 found_start;
406         unsigned long len;
407         struct extent_buffer *eb;
408         int ret;
409
410         tree = &BTRFS_I(page->mapping->host)->io_tree;
411
412         if (page->private == EXTENT_PAGE_PRIVATE) {
413                 WARN_ON(1);
414                 goto out;
415         }
416         if (!page->private) {
417                 WARN_ON(1);
418                 goto out;
419         }
420         len = page->private >> 2;
421         WARN_ON(len == 0);
422
423         eb = alloc_extent_buffer(tree, start, len, page);
424         if (eb == NULL) {
425                 WARN_ON(1);
426                 goto out;
427         }
428         ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
429                                              btrfs_header_generation(eb));
430         BUG_ON(ret);
431         WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
432
433         found_start = btrfs_header_bytenr(eb);
434         if (found_start != start) {
435                 WARN_ON(1);
436                 goto err;
437         }
438         if (eb->first_page != page) {
439                 WARN_ON(1);
440                 goto err;
441         }
442         if (!PageUptodate(page)) {
443                 WARN_ON(1);
444                 goto err;
445         }
446         csum_tree_block(root, eb, 0);
447 err:
448         free_extent_buffer(eb);
449 out:
450         return 0;
451 }
452
453 static int check_tree_block_fsid(struct btrfs_root *root,
454                                  struct extent_buffer *eb)
455 {
456         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
457         u8 fsid[BTRFS_UUID_SIZE];
458         int ret = 1;
459
460         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
461                            BTRFS_FSID_SIZE);
462         while (fs_devices) {
463                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
464                         ret = 0;
465                         break;
466                 }
467                 fs_devices = fs_devices->seed;
468         }
469         return ret;
470 }
471
472 #define CORRUPT(reason, eb, root, slot)                         \
473         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
474                "root=%llu, slot=%d\n", reason,                  \
475                (unsigned long long)btrfs_header_bytenr(eb),     \
476                (unsigned long long)root->objectid, slot)
477
478 static noinline int check_leaf(struct btrfs_root *root,
479                                struct extent_buffer *leaf)
480 {
481         struct btrfs_key key;
482         struct btrfs_key leaf_key;
483         u32 nritems = btrfs_header_nritems(leaf);
484         int slot;
485
486         if (nritems == 0)
487                 return 0;
488
489         /* Check the 0 item */
490         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
491             BTRFS_LEAF_DATA_SIZE(root)) {
492                 CORRUPT("invalid item offset size pair", leaf, root, 0);
493                 return -EIO;
494         }
495
496         /*
497          * Check to make sure each items keys are in the correct order and their
498          * offsets make sense.  We only have to loop through nritems-1 because
499          * we check the current slot against the next slot, which verifies the
500          * next slot's offset+size makes sense and that the current's slot
501          * offset is correct.
502          */
503         for (slot = 0; slot < nritems - 1; slot++) {
504                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
505                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
506
507                 /* Make sure the keys are in the right order */
508                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
509                         CORRUPT("bad key order", leaf, root, slot);
510                         return -EIO;
511                 }
512
513                 /*
514                  * Make sure the offset and ends are right, remember that the
515                  * item data starts at the end of the leaf and grows towards the
516                  * front.
517                  */
518                 if (btrfs_item_offset_nr(leaf, slot) !=
519                         btrfs_item_end_nr(leaf, slot + 1)) {
520                         CORRUPT("slot offset bad", leaf, root, slot);
521                         return -EIO;
522                 }
523
524                 /*
525                  * Check to make sure that we don't point outside of the leaf,
526                  * just incase all the items are consistent to eachother, but
527                  * all point outside of the leaf.
528                  */
529                 if (btrfs_item_end_nr(leaf, slot) >
530                     BTRFS_LEAF_DATA_SIZE(root)) {
531                         CORRUPT("slot end outside of leaf", leaf, root, slot);
532                         return -EIO;
533                 }
534         }
535
536         return 0;
537 }
538
539 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
540                                struct extent_state *state)
541 {
542         struct extent_io_tree *tree;
543         u64 found_start;
544         int found_level;
545         unsigned long len;
546         struct extent_buffer *eb;
547         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
548         int ret = 0;
549
550         tree = &BTRFS_I(page->mapping->host)->io_tree;
551         if (page->private == EXTENT_PAGE_PRIVATE)
552                 goto out;
553         if (!page->private)
554                 goto out;
555
556         len = page->private >> 2;
557         WARN_ON(len == 0);
558
559         eb = alloc_extent_buffer(tree, start, len, page);
560         if (eb == NULL) {
561                 ret = -EIO;
562                 goto out;
563         }
564
565         found_start = btrfs_header_bytenr(eb);
566         if (found_start != start) {
567                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
568                                "%llu %llu\n",
569                                (unsigned long long)found_start,
570                                (unsigned long long)eb->start);
571                 ret = -EIO;
572                 goto err;
573         }
574         if (eb->first_page != page) {
575                 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
576                        eb->first_page->index, page->index);
577                 WARN_ON(1);
578                 ret = -EIO;
579                 goto err;
580         }
581         if (check_tree_block_fsid(root, eb)) {
582                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
583                                (unsigned long long)eb->start);
584                 ret = -EIO;
585                 goto err;
586         }
587         found_level = btrfs_header_level(eb);
588
589         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
590                                        eb, found_level);
591
592         ret = csum_tree_block(root, eb, 1);
593         if (ret) {
594                 ret = -EIO;
595                 goto err;
596         }
597
598         /*
599          * If this is a leaf block and it is corrupt, set the corrupt bit so
600          * that we don't try and read the other copies of this block, just
601          * return -EIO.
602          */
603         if (found_level == 0 && check_leaf(root, eb)) {
604                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
605                 ret = -EIO;
606         }
607
608         end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
609         end = eb->start + end - 1;
610 err:
611         free_extent_buffer(eb);
612 out:
613         return ret;
614 }
615
616 static void end_workqueue_bio(struct bio *bio, int err)
617 {
618         struct end_io_wq *end_io_wq = bio->bi_private;
619         struct btrfs_fs_info *fs_info;
620
621         fs_info = end_io_wq->info;
622         end_io_wq->error = err;
623         end_io_wq->work.func = end_workqueue_fn;
624         end_io_wq->work.flags = 0;
625
626         if (bio->bi_rw & REQ_WRITE) {
627                 if (end_io_wq->metadata == 1)
628                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
629                                            &end_io_wq->work);
630                 else if (end_io_wq->metadata == 2)
631                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
632                                            &end_io_wq->work);
633                 else
634                         btrfs_queue_worker(&fs_info->endio_write_workers,
635                                            &end_io_wq->work);
636         } else {
637                 if (end_io_wq->metadata)
638                         btrfs_queue_worker(&fs_info->endio_meta_workers,
639                                            &end_io_wq->work);
640                 else
641                         btrfs_queue_worker(&fs_info->endio_workers,
642                                            &end_io_wq->work);
643         }
644 }
645
646 /*
647  * For the metadata arg you want
648  *
649  * 0 - if data
650  * 1 - if normal metadta
651  * 2 - if writing to the free space cache area
652  */
653 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
654                         int metadata)
655 {
656         struct end_io_wq *end_io_wq;
657         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
658         if (!end_io_wq)
659                 return -ENOMEM;
660
661         end_io_wq->private = bio->bi_private;
662         end_io_wq->end_io = bio->bi_end_io;
663         end_io_wq->info = info;
664         end_io_wq->error = 0;
665         end_io_wq->bio = bio;
666         end_io_wq->metadata = metadata;
667
668         bio->bi_private = end_io_wq;
669         bio->bi_end_io = end_workqueue_bio;
670         return 0;
671 }
672
673 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
674 {
675         unsigned long limit = min_t(unsigned long,
676                                     info->workers.max_workers,
677                                     info->fs_devices->open_devices);
678         return 256 * limit;
679 }
680
681 static void run_one_async_start(struct btrfs_work *work)
682 {
683         struct async_submit_bio *async;
684
685         async = container_of(work, struct  async_submit_bio, work);
686         async->submit_bio_start(async->inode, async->rw, async->bio,
687                                async->mirror_num, async->bio_flags,
688                                async->bio_offset);
689 }
690
691 static void run_one_async_done(struct btrfs_work *work)
692 {
693         struct btrfs_fs_info *fs_info;
694         struct async_submit_bio *async;
695         int limit;
696
697         async = container_of(work, struct  async_submit_bio, work);
698         fs_info = BTRFS_I(async->inode)->root->fs_info;
699
700         limit = btrfs_async_submit_limit(fs_info);
701         limit = limit * 2 / 3;
702
703         atomic_dec(&fs_info->nr_async_submits);
704
705         if (atomic_read(&fs_info->nr_async_submits) < limit &&
706             waitqueue_active(&fs_info->async_submit_wait))
707                 wake_up(&fs_info->async_submit_wait);
708
709         async->submit_bio_done(async->inode, async->rw, async->bio,
710                                async->mirror_num, async->bio_flags,
711                                async->bio_offset);
712 }
713
714 static void run_one_async_free(struct btrfs_work *work)
715 {
716         struct async_submit_bio *async;
717
718         async = container_of(work, struct  async_submit_bio, work);
719         kfree(async);
720 }
721
722 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
723                         int rw, struct bio *bio, int mirror_num,
724                         unsigned long bio_flags,
725                         u64 bio_offset,
726                         extent_submit_bio_hook_t *submit_bio_start,
727                         extent_submit_bio_hook_t *submit_bio_done)
728 {
729         struct async_submit_bio *async;
730
731         async = kmalloc(sizeof(*async), GFP_NOFS);
732         if (!async)
733                 return -ENOMEM;
734
735         async->inode = inode;
736         async->rw = rw;
737         async->bio = bio;
738         async->mirror_num = mirror_num;
739         async->submit_bio_start = submit_bio_start;
740         async->submit_bio_done = submit_bio_done;
741
742         async->work.func = run_one_async_start;
743         async->work.ordered_func = run_one_async_done;
744         async->work.ordered_free = run_one_async_free;
745
746         async->work.flags = 0;
747         async->bio_flags = bio_flags;
748         async->bio_offset = bio_offset;
749
750         atomic_inc(&fs_info->nr_async_submits);
751
752         if (rw & REQ_SYNC)
753                 btrfs_set_work_high_prio(&async->work);
754
755         btrfs_queue_worker(&fs_info->workers, &async->work);
756
757         while (atomic_read(&fs_info->async_submit_draining) &&
758               atomic_read(&fs_info->nr_async_submits)) {
759                 wait_event(fs_info->async_submit_wait,
760                            (atomic_read(&fs_info->nr_async_submits) == 0));
761         }
762
763         return 0;
764 }
765
766 static int btree_csum_one_bio(struct bio *bio)
767 {
768         struct bio_vec *bvec = bio->bi_io_vec;
769         int bio_index = 0;
770         struct btrfs_root *root;
771
772         WARN_ON(bio->bi_vcnt <= 0);
773         while (bio_index < bio->bi_vcnt) {
774                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
775                 csum_dirty_buffer(root, bvec->bv_page);
776                 bio_index++;
777                 bvec++;
778         }
779         return 0;
780 }
781
782 static int __btree_submit_bio_start(struct inode *inode, int rw,
783                                     struct bio *bio, int mirror_num,
784                                     unsigned long bio_flags,
785                                     u64 bio_offset)
786 {
787         /*
788          * when we're called for a write, we're already in the async
789          * submission context.  Just jump into btrfs_map_bio
790          */
791         btree_csum_one_bio(bio);
792         return 0;
793 }
794
795 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
796                                  int mirror_num, unsigned long bio_flags,
797                                  u64 bio_offset)
798 {
799         /*
800          * when we're called for a write, we're already in the async
801          * submission context.  Just jump into btrfs_map_bio
802          */
803         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
804 }
805
806 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
807                                  int mirror_num, unsigned long bio_flags,
808                                  u64 bio_offset)
809 {
810         int ret;
811
812         ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
813                                           bio, 1);
814         BUG_ON(ret);
815
816         if (!(rw & REQ_WRITE)) {
817                 /*
818                  * called for a read, do the setup so that checksum validation
819                  * can happen in the async kernel threads
820                  */
821                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
822                                      mirror_num, 0);
823         }
824
825         /*
826          * kthread helpers are used to submit writes so that checksumming
827          * can happen in parallel across all CPUs
828          */
829         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
830                                    inode, rw, bio, mirror_num, 0,
831                                    bio_offset,
832                                    __btree_submit_bio_start,
833                                    __btree_submit_bio_done);
834 }
835
836 #ifdef CONFIG_MIGRATION
837 static int btree_migratepage(struct address_space *mapping,
838                         struct page *newpage, struct page *page)
839 {
840         /*
841          * we can't safely write a btree page from here,
842          * we haven't done the locking hook
843          */
844         if (PageDirty(page))
845                 return -EAGAIN;
846         /*
847          * Buffers may be managed in a filesystem specific way.
848          * We must have no buffers or drop them.
849          */
850         if (page_has_private(page) &&
851             !try_to_release_page(page, GFP_KERNEL))
852                 return -EAGAIN;
853         return migrate_page(mapping, newpage, page);
854 }
855 #endif
856
857 static int btree_writepage(struct page *page, struct writeback_control *wbc)
858 {
859         struct extent_io_tree *tree;
860         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
861         struct extent_buffer *eb;
862         int was_dirty;
863
864         tree = &BTRFS_I(page->mapping->host)->io_tree;
865         if (!(current->flags & PF_MEMALLOC)) {
866                 return extent_write_full_page(tree, page,
867                                               btree_get_extent, wbc);
868         }
869
870         redirty_page_for_writepage(wbc, page);
871         eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
872         WARN_ON(!eb);
873
874         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
875         if (!was_dirty) {
876                 spin_lock(&root->fs_info->delalloc_lock);
877                 root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
878                 spin_unlock(&root->fs_info->delalloc_lock);
879         }
880         free_extent_buffer(eb);
881
882         unlock_page(page);
883         return 0;
884 }
885
886 static int btree_writepages(struct address_space *mapping,
887                             struct writeback_control *wbc)
888 {
889         struct extent_io_tree *tree;
890         tree = &BTRFS_I(mapping->host)->io_tree;
891         if (wbc->sync_mode == WB_SYNC_NONE) {
892                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
893                 u64 num_dirty;
894                 unsigned long thresh = 32 * 1024 * 1024;
895
896                 if (wbc->for_kupdate)
897                         return 0;
898
899                 /* this is a bit racy, but that's ok */
900                 num_dirty = root->fs_info->dirty_metadata_bytes;
901                 if (num_dirty < thresh)
902                         return 0;
903         }
904         return extent_writepages(tree, mapping, btree_get_extent, wbc);
905 }
906
907 static int btree_readpage(struct file *file, struct page *page)
908 {
909         struct extent_io_tree *tree;
910         tree = &BTRFS_I(page->mapping->host)->io_tree;
911         return extent_read_full_page(tree, page, btree_get_extent);
912 }
913
914 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
915 {
916         struct extent_io_tree *tree;
917         struct extent_map_tree *map;
918         int ret;
919
920         if (PageWriteback(page) || PageDirty(page))
921                 return 0;
922
923         tree = &BTRFS_I(page->mapping->host)->io_tree;
924         map = &BTRFS_I(page->mapping->host)->extent_tree;
925
926         ret = try_release_extent_state(map, tree, page, gfp_flags);
927         if (!ret)
928                 return 0;
929
930         ret = try_release_extent_buffer(tree, page);
931         if (ret == 1) {
932                 ClearPagePrivate(page);
933                 set_page_private(page, 0);
934                 page_cache_release(page);
935         }
936
937         return ret;
938 }
939
940 static void btree_invalidatepage(struct page *page, unsigned long offset)
941 {
942         struct extent_io_tree *tree;
943         tree = &BTRFS_I(page->mapping->host)->io_tree;
944         extent_invalidatepage(tree, page, offset);
945         btree_releasepage(page, GFP_NOFS);
946         if (PagePrivate(page)) {
947                 printk(KERN_WARNING "btrfs warning page private not zero "
948                        "on page %llu\n", (unsigned long long)page_offset(page));
949                 ClearPagePrivate(page);
950                 set_page_private(page, 0);
951                 page_cache_release(page);
952         }
953 }
954
955 static const struct address_space_operations btree_aops = {
956         .readpage       = btree_readpage,
957         .writepage      = btree_writepage,
958         .writepages     = btree_writepages,
959         .releasepage    = btree_releasepage,
960         .invalidatepage = btree_invalidatepage,
961 #ifdef CONFIG_MIGRATION
962         .migratepage    = btree_migratepage,
963 #endif
964 };
965
966 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
967                          u64 parent_transid)
968 {
969         struct extent_buffer *buf = NULL;
970         struct inode *btree_inode = root->fs_info->btree_inode;
971         int ret = 0;
972
973         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
974         if (!buf)
975                 return 0;
976         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
977                                  buf, 0, 0, btree_get_extent, 0);
978         free_extent_buffer(buf);
979         return ret;
980 }
981
982 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
983                                             u64 bytenr, u32 blocksize)
984 {
985         struct inode *btree_inode = root->fs_info->btree_inode;
986         struct extent_buffer *eb;
987         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
988                                 bytenr, blocksize);
989         return eb;
990 }
991
992 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
993                                                  u64 bytenr, u32 blocksize)
994 {
995         struct inode *btree_inode = root->fs_info->btree_inode;
996         struct extent_buffer *eb;
997
998         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
999                                  bytenr, blocksize, NULL);
1000         return eb;
1001 }
1002
1003
1004 int btrfs_write_tree_block(struct extent_buffer *buf)
1005 {
1006         return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
1007                                         buf->start + buf->len - 1);
1008 }
1009
1010 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1011 {
1012         return filemap_fdatawait_range(buf->first_page->mapping,
1013                                        buf->start, buf->start + buf->len - 1);
1014 }
1015
1016 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1017                                       u32 blocksize, u64 parent_transid)
1018 {
1019         struct extent_buffer *buf = NULL;
1020         int ret;
1021
1022         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1023         if (!buf)
1024                 return NULL;
1025
1026         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1027
1028         if (ret == 0)
1029                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
1030         return buf;
1031
1032 }
1033
1034 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1035                      struct extent_buffer *buf)
1036 {
1037         struct inode *btree_inode = root->fs_info->btree_inode;
1038         if (btrfs_header_generation(buf) ==
1039             root->fs_info->running_transaction->transid) {
1040                 btrfs_assert_tree_locked(buf);
1041
1042                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1043                         spin_lock(&root->fs_info->delalloc_lock);
1044                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
1045                                 root->fs_info->dirty_metadata_bytes -= buf->len;
1046                         else
1047                                 WARN_ON(1);
1048                         spin_unlock(&root->fs_info->delalloc_lock);
1049                 }
1050
1051                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1052                 btrfs_set_lock_blocking(buf);
1053                 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
1054                                           buf);
1055         }
1056         return 0;
1057 }
1058
1059 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1060                         u32 stripesize, struct btrfs_root *root,
1061                         struct btrfs_fs_info *fs_info,
1062                         u64 objectid)
1063 {
1064         root->node = NULL;
1065         root->commit_root = NULL;
1066         root->sectorsize = sectorsize;
1067         root->nodesize = nodesize;
1068         root->leafsize = leafsize;
1069         root->stripesize = stripesize;
1070         root->ref_cows = 0;
1071         root->track_dirty = 0;
1072         root->in_radix = 0;
1073         root->orphan_item_inserted = 0;
1074         root->orphan_cleanup_state = 0;
1075
1076         root->fs_info = fs_info;
1077         root->objectid = objectid;
1078         root->last_trans = 0;
1079         root->highest_objectid = 0;
1080         root->name = NULL;
1081         root->inode_tree = RB_ROOT;
1082         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1083         root->block_rsv = NULL;
1084         root->orphan_block_rsv = NULL;
1085
1086         INIT_LIST_HEAD(&root->dirty_list);
1087         INIT_LIST_HEAD(&root->orphan_list);
1088         INIT_LIST_HEAD(&root->root_list);
1089         spin_lock_init(&root->orphan_lock);
1090         spin_lock_init(&root->inode_lock);
1091         spin_lock_init(&root->accounting_lock);
1092         mutex_init(&root->objectid_mutex);
1093         mutex_init(&root->log_mutex);
1094         init_waitqueue_head(&root->log_writer_wait);
1095         init_waitqueue_head(&root->log_commit_wait[0]);
1096         init_waitqueue_head(&root->log_commit_wait[1]);
1097         atomic_set(&root->log_commit[0], 0);
1098         atomic_set(&root->log_commit[1], 0);
1099         atomic_set(&root->log_writers, 0);
1100         root->log_batch = 0;
1101         root->log_transid = 0;
1102         root->last_log_commit = 0;
1103         extent_io_tree_init(&root->dirty_log_pages,
1104                              fs_info->btree_inode->i_mapping);
1105
1106         memset(&root->root_key, 0, sizeof(root->root_key));
1107         memset(&root->root_item, 0, sizeof(root->root_item));
1108         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1109         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1110         root->defrag_trans_start = fs_info->generation;
1111         init_completion(&root->kobj_unregister);
1112         root->defrag_running = 0;
1113         root->root_key.objectid = objectid;
1114         root->anon_dev = 0;
1115         return 0;
1116 }
1117
1118 static int find_and_setup_root(struct btrfs_root *tree_root,
1119                                struct btrfs_fs_info *fs_info,
1120                                u64 objectid,
1121                                struct btrfs_root *root)
1122 {
1123         int ret;
1124         u32 blocksize;
1125         u64 generation;
1126
1127         __setup_root(tree_root->nodesize, tree_root->leafsize,
1128                      tree_root->sectorsize, tree_root->stripesize,
1129                      root, fs_info, objectid);
1130         ret = btrfs_find_last_root(tree_root, objectid,
1131                                    &root->root_item, &root->root_key);
1132         if (ret > 0)
1133                 return -ENOENT;
1134         BUG_ON(ret);
1135
1136         generation = btrfs_root_generation(&root->root_item);
1137         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1138         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1139                                      blocksize, generation);
1140         if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
1141                 free_extent_buffer(root->node);
1142                 return -EIO;
1143         }
1144         root->commit_root = btrfs_root_node(root);
1145         return 0;
1146 }
1147
1148 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1149                                          struct btrfs_fs_info *fs_info)
1150 {
1151         struct btrfs_root *root;
1152         struct btrfs_root *tree_root = fs_info->tree_root;
1153         struct extent_buffer *leaf;
1154
1155         root = kzalloc(sizeof(*root), GFP_NOFS);
1156         if (!root)
1157                 return ERR_PTR(-ENOMEM);
1158
1159         __setup_root(tree_root->nodesize, tree_root->leafsize,
1160                      tree_root->sectorsize, tree_root->stripesize,
1161                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1162
1163         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1164         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1165         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1166         /*
1167          * log trees do not get reference counted because they go away
1168          * before a real commit is actually done.  They do store pointers
1169          * to file data extents, and those reference counts still get
1170          * updated (along with back refs to the log tree).
1171          */
1172         root->ref_cows = 0;
1173
1174         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1175                                       BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
1176         if (IS_ERR(leaf)) {
1177                 kfree(root);
1178                 return ERR_CAST(leaf);
1179         }
1180
1181         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1182         btrfs_set_header_bytenr(leaf, leaf->start);
1183         btrfs_set_header_generation(leaf, trans->transid);
1184         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1185         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1186         root->node = leaf;
1187
1188         write_extent_buffer(root->node, root->fs_info->fsid,
1189                             (unsigned long)btrfs_header_fsid(root->node),
1190                             BTRFS_FSID_SIZE);
1191         btrfs_mark_buffer_dirty(root->node);
1192         btrfs_tree_unlock(root->node);
1193         return root;
1194 }
1195
1196 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1197                              struct btrfs_fs_info *fs_info)
1198 {
1199         struct btrfs_root *log_root;
1200
1201         log_root = alloc_log_tree(trans, fs_info);
1202         if (IS_ERR(log_root))
1203                 return PTR_ERR(log_root);
1204         WARN_ON(fs_info->log_root_tree);
1205         fs_info->log_root_tree = log_root;
1206         return 0;
1207 }
1208
1209 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1210                        struct btrfs_root *root)
1211 {
1212         struct btrfs_root *log_root;
1213         struct btrfs_inode_item *inode_item;
1214
1215         log_root = alloc_log_tree(trans, root->fs_info);
1216         if (IS_ERR(log_root))
1217                 return PTR_ERR(log_root);
1218
1219         log_root->last_trans = trans->transid;
1220         log_root->root_key.offset = root->root_key.objectid;
1221
1222         inode_item = &log_root->root_item.inode;
1223         inode_item->generation = cpu_to_le64(1);
1224         inode_item->size = cpu_to_le64(3);
1225         inode_item->nlink = cpu_to_le32(1);
1226         inode_item->nbytes = cpu_to_le64(root->leafsize);
1227         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1228
1229         btrfs_set_root_node(&log_root->root_item, log_root->node);
1230
1231         WARN_ON(root->log_root);
1232         root->log_root = log_root;
1233         root->log_transid = 0;
1234         root->last_log_commit = 0;
1235         return 0;
1236 }
1237
1238 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1239                                                struct btrfs_key *location)
1240 {
1241         struct btrfs_root *root;
1242         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1243         struct btrfs_path *path;
1244         struct extent_buffer *l;
1245         u64 generation;
1246         u32 blocksize;
1247         int ret = 0;
1248
1249         root = kzalloc(sizeof(*root), GFP_NOFS);
1250         if (!root)
1251                 return ERR_PTR(-ENOMEM);
1252         if (location->offset == (u64)-1) {
1253                 ret = find_and_setup_root(tree_root, fs_info,
1254                                           location->objectid, root);
1255                 if (ret) {
1256                         kfree(root);
1257                         return ERR_PTR(ret);
1258                 }
1259                 goto out;
1260         }
1261
1262         __setup_root(tree_root->nodesize, tree_root->leafsize,
1263                      tree_root->sectorsize, tree_root->stripesize,
1264                      root, fs_info, location->objectid);
1265
1266         path = btrfs_alloc_path();
1267         if (!path) {
1268                 kfree(root);
1269                 return ERR_PTR(-ENOMEM);
1270         }
1271         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1272         if (ret == 0) {
1273                 l = path->nodes[0];
1274                 read_extent_buffer(l, &root->root_item,
1275                                 btrfs_item_ptr_offset(l, path->slots[0]),
1276                                 sizeof(root->root_item));
1277                 memcpy(&root->root_key, location, sizeof(*location));
1278         }
1279         btrfs_free_path(path);
1280         if (ret) {
1281                 kfree(root);
1282                 if (ret > 0)
1283                         ret = -ENOENT;
1284                 return ERR_PTR(ret);
1285         }
1286
1287         generation = btrfs_root_generation(&root->root_item);
1288         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1289         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1290                                      blocksize, generation);
1291         root->commit_root = btrfs_root_node(root);
1292         BUG_ON(!root->node);
1293 out:
1294         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1295                 root->ref_cows = 1;
1296                 btrfs_check_and_init_root_item(&root->root_item);
1297         }
1298
1299         return root;
1300 }
1301
1302 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1303                                               struct btrfs_key *location)
1304 {
1305         struct btrfs_root *root;
1306         int ret;
1307
1308         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1309                 return fs_info->tree_root;
1310         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1311                 return fs_info->extent_root;
1312         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1313                 return fs_info->chunk_root;
1314         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1315                 return fs_info->dev_root;
1316         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1317                 return fs_info->csum_root;
1318 again:
1319         spin_lock(&fs_info->fs_roots_radix_lock);
1320         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1321                                  (unsigned long)location->objectid);
1322         spin_unlock(&fs_info->fs_roots_radix_lock);
1323         if (root)
1324                 return root;
1325
1326         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1327         if (IS_ERR(root))
1328                 return root;
1329
1330         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1331         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1332                                         GFP_NOFS);
1333         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1334                 ret = -ENOMEM;
1335                 goto fail;
1336         }
1337
1338         btrfs_init_free_ino_ctl(root);
1339         mutex_init(&root->fs_commit_mutex);
1340         spin_lock_init(&root->cache_lock);
1341         init_waitqueue_head(&root->cache_wait);
1342
1343         ret = get_anon_bdev(&root->anon_dev);
1344         if (ret)
1345                 goto fail;
1346
1347         if (btrfs_root_refs(&root->root_item) == 0) {
1348                 ret = -ENOENT;
1349                 goto fail;
1350         }
1351
1352         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1353         if (ret < 0)
1354                 goto fail;
1355         if (ret == 0)
1356                 root->orphan_item_inserted = 1;
1357
1358         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1359         if (ret)
1360                 goto fail;
1361
1362         spin_lock(&fs_info->fs_roots_radix_lock);
1363         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1364                                 (unsigned long)root->root_key.objectid,
1365                                 root);
1366         if (ret == 0)
1367                 root->in_radix = 1;
1368
1369         spin_unlock(&fs_info->fs_roots_radix_lock);
1370         radix_tree_preload_end();
1371         if (ret) {
1372                 if (ret == -EEXIST) {
1373                         free_fs_root(root);
1374                         goto again;
1375                 }
1376                 goto fail;
1377         }
1378
1379         ret = btrfs_find_dead_roots(fs_info->tree_root,
1380                                     root->root_key.objectid);
1381         WARN_ON(ret);
1382         return root;
1383 fail:
1384         free_fs_root(root);
1385         return ERR_PTR(ret);
1386 }
1387
1388 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1389 {
1390         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1391         int ret = 0;
1392         struct btrfs_device *device;
1393         struct backing_dev_info *bdi;
1394
1395         rcu_read_lock();
1396         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1397                 if (!device->bdev)
1398                         continue;
1399                 bdi = blk_get_backing_dev_info(device->bdev);
1400                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1401                         ret = 1;
1402                         break;
1403                 }
1404         }
1405         rcu_read_unlock();
1406         return ret;
1407 }
1408
1409 /*
1410  * If this fails, caller must call bdi_destroy() to get rid of the
1411  * bdi again.
1412  */
1413 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1414 {
1415         int err;
1416
1417         bdi->capabilities = BDI_CAP_MAP_COPY;
1418         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1419         if (err)
1420                 return err;
1421
1422         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1423         bdi->congested_fn       = btrfs_congested_fn;
1424         bdi->congested_data     = info;
1425         return 0;
1426 }
1427
1428 static int bio_ready_for_csum(struct bio *bio)
1429 {
1430         u64 length = 0;
1431         u64 buf_len = 0;
1432         u64 start = 0;
1433         struct page *page;
1434         struct extent_io_tree *io_tree = NULL;
1435         struct bio_vec *bvec;
1436         int i;
1437         int ret;
1438
1439         bio_for_each_segment(bvec, bio, i) {
1440                 page = bvec->bv_page;
1441                 if (page->private == EXTENT_PAGE_PRIVATE) {
1442                         length += bvec->bv_len;
1443                         continue;
1444                 }
1445                 if (!page->private) {
1446                         length += bvec->bv_len;
1447                         continue;
1448                 }
1449                 length = bvec->bv_len;
1450                 buf_len = page->private >> 2;
1451                 start = page_offset(page) + bvec->bv_offset;
1452                 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1453         }
1454         /* are we fully contained in this bio? */
1455         if (buf_len <= length)
1456                 return 1;
1457
1458         ret = extent_range_uptodate(io_tree, start + length,
1459                                     start + buf_len - 1);
1460         return ret;
1461 }
1462
1463 /*
1464  * called by the kthread helper functions to finally call the bio end_io
1465  * functions.  This is where read checksum verification actually happens
1466  */
1467 static void end_workqueue_fn(struct btrfs_work *work)
1468 {
1469         struct bio *bio;
1470         struct end_io_wq *end_io_wq;
1471         struct btrfs_fs_info *fs_info;
1472         int error;
1473
1474         end_io_wq = container_of(work, struct end_io_wq, work);
1475         bio = end_io_wq->bio;
1476         fs_info = end_io_wq->info;
1477
1478         /* metadata bio reads are special because the whole tree block must
1479          * be checksummed at once.  This makes sure the entire block is in
1480          * ram and up to date before trying to verify things.  For
1481          * blocksize <= pagesize, it is basically a noop
1482          */
1483         if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
1484             !bio_ready_for_csum(bio)) {
1485                 btrfs_queue_worker(&fs_info->endio_meta_workers,
1486                                    &end_io_wq->work);
1487                 return;
1488         }
1489         error = end_io_wq->error;
1490         bio->bi_private = end_io_wq->private;
1491         bio->bi_end_io = end_io_wq->end_io;
1492         kfree(end_io_wq);
1493         bio_endio(bio, error);
1494 }
1495
1496 static int cleaner_kthread(void *arg)
1497 {
1498         struct btrfs_root *root = arg;
1499
1500         do {
1501                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1502
1503                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1504                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1505                         btrfs_run_delayed_iputs(root);
1506                         btrfs_clean_old_snapshots(root);
1507                         mutex_unlock(&root->fs_info->cleaner_mutex);
1508                         btrfs_run_defrag_inodes(root->fs_info);
1509                 }
1510
1511                 if (freezing(current)) {
1512                         refrigerator();
1513                 } else {
1514                         set_current_state(TASK_INTERRUPTIBLE);
1515                         if (!kthread_should_stop())
1516                                 schedule();
1517                         __set_current_state(TASK_RUNNING);
1518                 }
1519         } while (!kthread_should_stop());
1520         return 0;
1521 }
1522
1523 static int transaction_kthread(void *arg)
1524 {
1525         struct btrfs_root *root = arg;
1526         struct btrfs_trans_handle *trans;
1527         struct btrfs_transaction *cur;
1528         u64 transid;
1529         unsigned long now;
1530         unsigned long delay;
1531         int ret;
1532
1533         do {
1534                 delay = HZ * 30;
1535                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1536                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1537
1538                 spin_lock(&root->fs_info->trans_lock);
1539                 cur = root->fs_info->running_transaction;
1540                 if (!cur) {
1541                         spin_unlock(&root->fs_info->trans_lock);
1542                         goto sleep;
1543                 }
1544
1545                 now = get_seconds();
1546                 if (!cur->blocked &&
1547                     (now < cur->start_time || now - cur->start_time < 30)) {
1548                         spin_unlock(&root->fs_info->trans_lock);
1549                         delay = HZ * 5;
1550                         goto sleep;
1551                 }
1552                 transid = cur->transid;
1553                 spin_unlock(&root->fs_info->trans_lock);
1554
1555                 trans = btrfs_join_transaction(root);
1556                 BUG_ON(IS_ERR(trans));
1557                 if (transid == trans->transid) {
1558                         ret = btrfs_commit_transaction(trans, root);
1559                         BUG_ON(ret);
1560                 } else {
1561                         btrfs_end_transaction(trans, root);
1562                 }
1563 sleep:
1564                 wake_up_process(root->fs_info->cleaner_kthread);
1565                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1566
1567                 if (freezing(current)) {
1568                         refrigerator();
1569                 } else {
1570                         set_current_state(TASK_INTERRUPTIBLE);
1571                         if (!kthread_should_stop() &&
1572                             !btrfs_transaction_blocked(root->fs_info))
1573                                 schedule_timeout(delay);
1574                         __set_current_state(TASK_RUNNING);
1575                 }
1576         } while (!kthread_should_stop());
1577         return 0;
1578 }
1579
1580 struct btrfs_root *open_ctree(struct super_block *sb,
1581                               struct btrfs_fs_devices *fs_devices,
1582                               char *options)
1583 {
1584         u32 sectorsize;
1585         u32 nodesize;
1586         u32 leafsize;
1587         u32 blocksize;
1588         u32 stripesize;
1589         u64 generation;
1590         u64 features;
1591         struct btrfs_key location;
1592         struct buffer_head *bh;
1593         struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1594                                                  GFP_NOFS);
1595         struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1596                                                  GFP_NOFS);
1597         struct btrfs_root *tree_root = btrfs_sb(sb);
1598         struct btrfs_fs_info *fs_info = NULL;
1599         struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1600                                                 GFP_NOFS);
1601         struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1602                                               GFP_NOFS);
1603         struct btrfs_root *log_tree_root;
1604
1605         int ret;
1606         int err = -EINVAL;
1607
1608         struct btrfs_super_block *disk_super;
1609
1610         if (!extent_root || !tree_root || !tree_root->fs_info ||
1611             !chunk_root || !dev_root || !csum_root) {
1612                 err = -ENOMEM;
1613                 goto fail;
1614         }
1615         fs_info = tree_root->fs_info;
1616
1617         ret = init_srcu_struct(&fs_info->subvol_srcu);
1618         if (ret) {
1619                 err = ret;
1620                 goto fail;
1621         }
1622
1623         ret = setup_bdi(fs_info, &fs_info->bdi);
1624         if (ret) {
1625                 err = ret;
1626                 goto fail_srcu;
1627         }
1628
1629         fs_info->btree_inode = new_inode(sb);
1630         if (!fs_info->btree_inode) {
1631                 err = -ENOMEM;
1632                 goto fail_bdi;
1633         }
1634
1635         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1636
1637         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1638         INIT_LIST_HEAD(&fs_info->trans_list);
1639         INIT_LIST_HEAD(&fs_info->dead_roots);
1640         INIT_LIST_HEAD(&fs_info->delayed_iputs);
1641         INIT_LIST_HEAD(&fs_info->hashers);
1642         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1643         INIT_LIST_HEAD(&fs_info->ordered_operations);
1644         INIT_LIST_HEAD(&fs_info->caching_block_groups);
1645         spin_lock_init(&fs_info->delalloc_lock);
1646         spin_lock_init(&fs_info->trans_lock);
1647         spin_lock_init(&fs_info->ref_cache_lock);
1648         spin_lock_init(&fs_info->fs_roots_radix_lock);
1649         spin_lock_init(&fs_info->delayed_iput_lock);
1650         spin_lock_init(&fs_info->defrag_inodes_lock);
1651         mutex_init(&fs_info->reloc_mutex);
1652
1653         init_completion(&fs_info->kobj_unregister);
1654         fs_info->tree_root = tree_root;
1655         fs_info->extent_root = extent_root;
1656         fs_info->csum_root = csum_root;
1657         fs_info->chunk_root = chunk_root;
1658         fs_info->dev_root = dev_root;
1659         fs_info->fs_devices = fs_devices;
1660         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1661         INIT_LIST_HEAD(&fs_info->space_info);
1662         btrfs_mapping_init(&fs_info->mapping_tree);
1663         btrfs_init_block_rsv(&fs_info->global_block_rsv);
1664         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1665         btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1666         btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1667         btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1668         INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
1669         mutex_init(&fs_info->durable_block_rsv_mutex);
1670         atomic_set(&fs_info->nr_async_submits, 0);
1671         atomic_set(&fs_info->async_delalloc_pages, 0);
1672         atomic_set(&fs_info->async_submit_draining, 0);
1673         atomic_set(&fs_info->nr_async_bios, 0);
1674         atomic_set(&fs_info->defrag_running, 0);
1675         fs_info->sb = sb;
1676         fs_info->max_inline = 8192 * 1024;
1677         fs_info->metadata_ratio = 0;
1678         fs_info->defrag_inodes = RB_ROOT;
1679         fs_info->trans_no_join = 0;
1680
1681         fs_info->thread_pool_size = min_t(unsigned long,
1682                                           num_online_cpus() + 2, 8);
1683
1684         INIT_LIST_HEAD(&fs_info->ordered_extents);
1685         spin_lock_init(&fs_info->ordered_extent_lock);
1686         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
1687                                         GFP_NOFS);
1688         if (!fs_info->delayed_root) {
1689                 err = -ENOMEM;
1690                 goto fail_iput;
1691         }
1692         btrfs_init_delayed_root(fs_info->delayed_root);
1693
1694         mutex_init(&fs_info->scrub_lock);
1695         atomic_set(&fs_info->scrubs_running, 0);
1696         atomic_set(&fs_info->scrub_pause_req, 0);
1697         atomic_set(&fs_info->scrubs_paused, 0);
1698         atomic_set(&fs_info->scrub_cancel_req, 0);
1699         init_waitqueue_head(&fs_info->scrub_pause_wait);
1700         init_rwsem(&fs_info->scrub_super_lock);
1701         fs_info->scrub_workers_refcnt = 0;
1702
1703         sb->s_blocksize = 4096;
1704         sb->s_blocksize_bits = blksize_bits(4096);
1705         sb->s_bdi = &fs_info->bdi;
1706
1707         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1708         fs_info->btree_inode->i_nlink = 1;
1709         /*
1710          * we set the i_size on the btree inode to the max possible int.
1711          * the real end of the address space is determined by all of
1712          * the devices in the system
1713          */
1714         fs_info->btree_inode->i_size = OFFSET_MAX;
1715         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1716         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1717
1718         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1719         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1720                              fs_info->btree_inode->i_mapping);
1721         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
1722
1723         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1724
1725         BTRFS_I(fs_info->btree_inode)->root = tree_root;
1726         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1727                sizeof(struct btrfs_key));
1728         BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1729         insert_inode_hash(fs_info->btree_inode);
1730
1731         spin_lock_init(&fs_info->block_group_cache_lock);
1732         fs_info->block_group_cache_tree = RB_ROOT;
1733
1734         extent_io_tree_init(&fs_info->freed_extents[0],
1735                              fs_info->btree_inode->i_mapping);
1736         extent_io_tree_init(&fs_info->freed_extents[1],
1737                              fs_info->btree_inode->i_mapping);
1738         fs_info->pinned_extents = &fs_info->freed_extents[0];
1739         fs_info->do_barriers = 1;
1740
1741
1742         mutex_init(&fs_info->ordered_operations_mutex);
1743         mutex_init(&fs_info->tree_log_mutex);
1744         mutex_init(&fs_info->chunk_mutex);
1745         mutex_init(&fs_info->transaction_kthread_mutex);
1746         mutex_init(&fs_info->cleaner_mutex);
1747         mutex_init(&fs_info->volume_mutex);
1748         init_rwsem(&fs_info->extent_commit_sem);
1749         init_rwsem(&fs_info->cleanup_work_sem);
1750         init_rwsem(&fs_info->subvol_sem);
1751
1752         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1753         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1754
1755         init_waitqueue_head(&fs_info->transaction_throttle);
1756         init_waitqueue_head(&fs_info->transaction_wait);
1757         init_waitqueue_head(&fs_info->transaction_blocked_wait);
1758         init_waitqueue_head(&fs_info->async_submit_wait);
1759
1760         __setup_root(4096, 4096, 4096, 4096, tree_root,
1761                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
1762
1763         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1764         if (!bh) {
1765                 err = -EINVAL;
1766                 goto fail_alloc;
1767         }
1768
1769         memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1770         memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1771                sizeof(fs_info->super_for_commit));
1772         brelse(bh);
1773
1774         memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1775
1776         disk_super = &fs_info->super_copy;
1777         if (!btrfs_super_root(disk_super))
1778                 goto fail_alloc;
1779
1780         /* check FS state, whether FS is broken. */
1781         fs_info->fs_state |= btrfs_super_flags(disk_super);
1782
1783         btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
1784
1785         /*
1786          * In the long term, we'll store the compression type in the super
1787          * block, and it'll be used for per file compression control.
1788          */
1789         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
1790
1791         ret = btrfs_parse_options(tree_root, options);
1792         if (ret) {
1793                 err = ret;
1794                 goto fail_alloc;
1795         }
1796
1797         features = btrfs_super_incompat_flags(disk_super) &
1798                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
1799         if (features) {
1800                 printk(KERN_ERR "BTRFS: couldn't mount because of "
1801                        "unsupported optional features (%Lx).\n",
1802                        (unsigned long long)features);
1803                 err = -EINVAL;
1804                 goto fail_alloc;
1805         }
1806
1807         features = btrfs_super_incompat_flags(disk_super);
1808         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1809         if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
1810                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
1811         btrfs_set_super_incompat_flags(disk_super, features);
1812
1813         features = btrfs_super_compat_ro_flags(disk_super) &
1814                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
1815         if (!(sb->s_flags & MS_RDONLY) && features) {
1816                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1817                        "unsupported option features (%Lx).\n",
1818                        (unsigned long long)features);
1819                 err = -EINVAL;
1820                 goto fail_alloc;
1821         }
1822
1823         btrfs_init_workers(&fs_info->generic_worker,
1824                            "genwork", 1, NULL);
1825
1826         btrfs_init_workers(&fs_info->workers, "worker",
1827                            fs_info->thread_pool_size,
1828                            &fs_info->generic_worker);
1829
1830         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1831                            fs_info->thread_pool_size,
1832                            &fs_info->generic_worker);
1833
1834         btrfs_init_workers(&fs_info->submit_workers, "submit",
1835                            min_t(u64, fs_devices->num_devices,
1836                            fs_info->thread_pool_size),
1837                            &fs_info->generic_worker);
1838
1839         btrfs_init_workers(&fs_info->caching_workers, "cache",
1840                            2, &fs_info->generic_worker);
1841
1842         /* a higher idle thresh on the submit workers makes it much more
1843          * likely that bios will be send down in a sane order to the
1844          * devices
1845          */
1846         fs_info->submit_workers.idle_thresh = 64;
1847
1848         fs_info->workers.idle_thresh = 16;
1849         fs_info->workers.ordered = 1;
1850
1851         fs_info->delalloc_workers.idle_thresh = 2;
1852         fs_info->delalloc_workers.ordered = 1;
1853
1854         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1855                            &fs_info->generic_worker);
1856         btrfs_init_workers(&fs_info->endio_workers, "endio",
1857                            fs_info->thread_pool_size,
1858                            &fs_info->generic_worker);
1859         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1860                            fs_info->thread_pool_size,
1861                            &fs_info->generic_worker);
1862         btrfs_init_workers(&fs_info->endio_meta_write_workers,
1863                            "endio-meta-write", fs_info->thread_pool_size,
1864                            &fs_info->generic_worker);
1865         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1866                            fs_info->thread_pool_size,
1867                            &fs_info->generic_worker);
1868         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
1869                            1, &fs_info->generic_worker);
1870         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
1871                            fs_info->thread_pool_size,
1872                            &fs_info->generic_worker);
1873
1874         /*
1875          * endios are largely parallel and should have a very
1876          * low idle thresh
1877          */
1878         fs_info->endio_workers.idle_thresh = 4;
1879         fs_info->endio_meta_workers.idle_thresh = 4;
1880
1881         fs_info->endio_write_workers.idle_thresh = 2;
1882         fs_info->endio_meta_write_workers.idle_thresh = 2;
1883
1884         btrfs_start_workers(&fs_info->workers, 1);
1885         btrfs_start_workers(&fs_info->generic_worker, 1);
1886         btrfs_start_workers(&fs_info->submit_workers, 1);
1887         btrfs_start_workers(&fs_info->delalloc_workers, 1);
1888         btrfs_start_workers(&fs_info->fixup_workers, 1);
1889         btrfs_start_workers(&fs_info->endio_workers, 1);
1890         btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1891         btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1892         btrfs_start_workers(&fs_info->endio_write_workers, 1);
1893         btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
1894         btrfs_start_workers(&fs_info->delayed_workers, 1);
1895         btrfs_start_workers(&fs_info->caching_workers, 1);
1896
1897         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1898         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1899                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1900
1901         nodesize = btrfs_super_nodesize(disk_super);
1902         leafsize = btrfs_super_leafsize(disk_super);
1903         sectorsize = btrfs_super_sectorsize(disk_super);
1904         stripesize = btrfs_super_stripesize(disk_super);
1905         tree_root->nodesize = nodesize;
1906         tree_root->leafsize = leafsize;
1907         tree_root->sectorsize = sectorsize;
1908         tree_root->stripesize = stripesize;
1909
1910         sb->s_blocksize = sectorsize;
1911         sb->s_blocksize_bits = blksize_bits(sectorsize);
1912
1913         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1914                     sizeof(disk_super->magic))) {
1915                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1916                 goto fail_sb_buffer;
1917         }
1918
1919         mutex_lock(&fs_info->chunk_mutex);
1920         ret = btrfs_read_sys_array(tree_root);
1921         mutex_unlock(&fs_info->chunk_mutex);
1922         if (ret) {
1923                 printk(KERN_WARNING "btrfs: failed to read the system "
1924                        "array on %s\n", sb->s_id);
1925                 goto fail_sb_buffer;
1926         }
1927
1928         blocksize = btrfs_level_size(tree_root,
1929                                      btrfs_super_chunk_root_level(disk_super));
1930         generation = btrfs_super_chunk_root_generation(disk_super);
1931
1932         __setup_root(nodesize, leafsize, sectorsize, stripesize,
1933                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1934
1935         chunk_root->node = read_tree_block(chunk_root,
1936                                            btrfs_super_chunk_root(disk_super),
1937                                            blocksize, generation);
1938         BUG_ON(!chunk_root->node);
1939         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1940                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1941                        sb->s_id);
1942                 goto fail_chunk_root;
1943         }
1944         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1945         chunk_root->commit_root = btrfs_root_node(chunk_root);
1946
1947         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1948            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1949            BTRFS_UUID_SIZE);
1950
1951         mutex_lock(&fs_info->chunk_mutex);
1952         ret = btrfs_read_chunk_tree(chunk_root);
1953         mutex_unlock(&fs_info->chunk_mutex);
1954         if (ret) {
1955                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1956                        sb->s_id);
1957                 goto fail_chunk_root;
1958         }
1959
1960         btrfs_close_extra_devices(fs_devices);
1961
1962         blocksize = btrfs_level_size(tree_root,
1963                                      btrfs_super_root_level(disk_super));
1964         generation = btrfs_super_generation(disk_super);
1965
1966         tree_root->node = read_tree_block(tree_root,
1967                                           btrfs_super_root(disk_super),
1968                                           blocksize, generation);
1969         if (!tree_root->node)
1970                 goto fail_chunk_root;
1971         if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1972                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1973                        sb->s_id);
1974                 goto fail_tree_root;
1975         }
1976         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1977         tree_root->commit_root = btrfs_root_node(tree_root);
1978
1979         ret = find_and_setup_root(tree_root, fs_info,
1980                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1981         if (ret)
1982                 goto fail_tree_root;
1983         extent_root->track_dirty = 1;
1984
1985         ret = find_and_setup_root(tree_root, fs_info,
1986                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
1987         if (ret)
1988                 goto fail_extent_root;
1989         dev_root->track_dirty = 1;
1990
1991         ret = find_and_setup_root(tree_root, fs_info,
1992                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
1993         if (ret)
1994                 goto fail_dev_root;
1995
1996         csum_root->track_dirty = 1;
1997
1998         fs_info->generation = generation;
1999         fs_info->last_trans_committed = generation;
2000         fs_info->data_alloc_profile = (u64)-1;
2001         fs_info->metadata_alloc_profile = (u64)-1;
2002         fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
2003
2004         ret = btrfs_init_space_info(fs_info);
2005         if (ret) {
2006                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2007                 goto fail_block_groups;
2008         }
2009
2010         ret = btrfs_read_block_groups(extent_root);
2011         if (ret) {
2012                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2013                 goto fail_block_groups;
2014         }
2015
2016         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2017                                                "btrfs-cleaner");
2018         if (IS_ERR(fs_info->cleaner_kthread))
2019                 goto fail_block_groups;
2020
2021         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2022                                                    tree_root,
2023                                                    "btrfs-transaction");
2024         if (IS_ERR(fs_info->transaction_kthread))
2025                 goto fail_cleaner;
2026
2027         if (!btrfs_test_opt(tree_root, SSD) &&
2028             !btrfs_test_opt(tree_root, NOSSD) &&
2029             !fs_info->fs_devices->rotating) {
2030                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2031                        "mode\n");
2032                 btrfs_set_opt(fs_info->mount_opt, SSD);
2033         }
2034
2035         /* do not make disk changes in broken FS */
2036         if (btrfs_super_log_root(disk_super) != 0 &&
2037             !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2038                 u64 bytenr = btrfs_super_log_root(disk_super);
2039
2040                 if (fs_devices->rw_devices == 0) {
2041                         printk(KERN_WARNING "Btrfs log replay required "
2042                                "on RO media\n");
2043                         err = -EIO;
2044                         goto fail_trans_kthread;
2045                 }
2046                 blocksize =
2047                      btrfs_level_size(tree_root,
2048                                       btrfs_super_log_root_level(disk_super));
2049
2050                 log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
2051                 if (!log_tree_root) {
2052                         err = -ENOMEM;
2053                         goto fail_trans_kthread;
2054                 }
2055
2056                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2057                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2058
2059                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2060                                                       blocksize,
2061                                                       generation + 1);
2062                 ret = btrfs_recover_log_trees(log_tree_root);
2063                 BUG_ON(ret);
2064
2065                 if (sb->s_flags & MS_RDONLY) {
2066                         ret =  btrfs_commit_super(tree_root);
2067                         BUG_ON(ret);
2068                 }
2069         }
2070
2071         ret = btrfs_find_orphan_roots(tree_root);
2072         BUG_ON(ret);
2073
2074         if (!(sb->s_flags & MS_RDONLY)) {
2075                 ret = btrfs_cleanup_fs_roots(fs_info);
2076                 BUG_ON(ret);
2077
2078                 ret = btrfs_recover_relocation(tree_root);
2079                 if (ret < 0) {
2080                         printk(KERN_WARNING
2081                                "btrfs: failed to recover relocation\n");
2082                         err = -EINVAL;
2083                         goto fail_trans_kthread;
2084                 }
2085         }
2086
2087         location.objectid = BTRFS_FS_TREE_OBJECTID;
2088         location.type = BTRFS_ROOT_ITEM_KEY;
2089         location.offset = (u64)-1;
2090
2091         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2092         if (!fs_info->fs_root)
2093                 goto fail_trans_kthread;
2094         if (IS_ERR(fs_info->fs_root)) {
2095                 err = PTR_ERR(fs_info->fs_root);
2096                 goto fail_trans_kthread;
2097         }
2098
2099         if (!(sb->s_flags & MS_RDONLY)) {
2100                 down_read(&fs_info->cleanup_work_sem);
2101                 err = btrfs_orphan_cleanup(fs_info->fs_root);
2102                 if (!err)
2103                         err = btrfs_orphan_cleanup(fs_info->tree_root);
2104                 up_read(&fs_info->cleanup_work_sem);
2105                 if (err) {
2106                         close_ctree(tree_root);
2107                         return ERR_PTR(err);
2108                 }
2109         }
2110
2111         return tree_root;
2112
2113 fail_trans_kthread:
2114         kthread_stop(fs_info->transaction_kthread);
2115 fail_cleaner:
2116         kthread_stop(fs_info->cleaner_kthread);
2117
2118         /*
2119          * make sure we're done with the btree inode before we stop our
2120          * kthreads
2121          */
2122         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2123         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2124
2125 fail_block_groups:
2126         btrfs_free_block_groups(fs_info);
2127         free_extent_buffer(csum_root->node);
2128         free_extent_buffer(csum_root->commit_root);
2129 fail_dev_root:
2130         free_extent_buffer(dev_root->node);
2131         free_extent_buffer(dev_root->commit_root);
2132 fail_extent_root:
2133         free_extent_buffer(extent_root->node);
2134         free_extent_buffer(extent_root->commit_root);
2135 fail_tree_root:
2136         free_extent_buffer(tree_root->node);
2137         free_extent_buffer(tree_root->commit_root);
2138 fail_chunk_root:
2139         free_extent_buffer(chunk_root->node);
2140         free_extent_buffer(chunk_root->commit_root);
2141 fail_sb_buffer:
2142         btrfs_stop_workers(&fs_info->generic_worker);
2143         btrfs_stop_workers(&fs_info->fixup_workers);
2144         btrfs_stop_workers(&fs_info->delalloc_workers);
2145         btrfs_stop_workers(&fs_info->workers);
2146         btrfs_stop_workers(&fs_info->endio_workers);
2147         btrfs_stop_workers(&fs_info->endio_meta_workers);
2148         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2149         btrfs_stop_workers(&fs_info->endio_write_workers);
2150         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2151         btrfs_stop_workers(&fs_info->submit_workers);
2152         btrfs_stop_workers(&fs_info->delayed_workers);
2153         btrfs_stop_workers(&fs_info->caching_workers);
2154 fail_alloc:
2155         kfree(fs_info->delayed_root);
2156 fail_iput:
2157         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2158         iput(fs_info->btree_inode);
2159
2160         btrfs_close_devices(fs_info->fs_devices);
2161         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2162 fail_bdi:
2163         bdi_destroy(&fs_info->bdi);
2164 fail_srcu:
2165         cleanup_srcu_struct(&fs_info->subvol_srcu);
2166 fail:
2167         kfree(extent_root);
2168         kfree(tree_root);
2169         kfree(fs_info);
2170         kfree(chunk_root);
2171         kfree(dev_root);
2172         kfree(csum_root);
2173         return ERR_PTR(err);
2174 }
2175
2176 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2177 {
2178         char b[BDEVNAME_SIZE];
2179
2180         if (uptodate) {
2181                 set_buffer_uptodate(bh);
2182         } else {
2183                 printk_ratelimited(KERN_WARNING "lost page write due to "
2184                                         "I/O error on %s\n",
2185                                        bdevname(bh->b_bdev, b));
2186                 /* note, we dont' set_buffer_write_io_error because we have
2187                  * our own ways of dealing with the IO errors
2188                  */
2189                 clear_buffer_uptodate(bh);
2190         }
2191         unlock_buffer(bh);
2192         put_bh(bh);
2193 }
2194
2195 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2196 {
2197         struct buffer_head *bh;
2198         struct buffer_head *latest = NULL;
2199         struct btrfs_super_block *super;
2200         int i;
2201         u64 transid = 0;
2202         u64 bytenr;
2203
2204         /* we would like to check all the supers, but that would make
2205          * a btrfs mount succeed after a mkfs from a different FS.
2206          * So, we need to add a special mount option to scan for
2207          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2208          */
2209         for (i = 0; i < 1; i++) {
2210                 bytenr = btrfs_sb_offset(i);
2211                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2212                         break;
2213                 bh = __bread(bdev, bytenr / 4096, 4096);
2214                 if (!bh)
2215                         continue;
2216
2217                 super = (struct btrfs_super_block *)bh->b_data;
2218                 if (btrfs_super_bytenr(super) != bytenr ||
2219                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2220                             sizeof(super->magic))) {
2221                         brelse(bh);
2222                         continue;
2223                 }
2224
2225                 if (!latest || btrfs_super_generation(super) > transid) {
2226                         brelse(latest);
2227                         latest = bh;
2228                         transid = btrfs_super_generation(super);
2229                 } else {
2230                         brelse(bh);
2231                 }
2232         }
2233         return latest;
2234 }
2235
2236 /*
2237  * this should be called twice, once with wait == 0 and
2238  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2239  * we write are pinned.
2240  *
2241  * They are released when wait == 1 is done.
2242  * max_mirrors must be the same for both runs, and it indicates how
2243  * many supers on this one device should be written.
2244  *
2245  * max_mirrors == 0 means to write them all.
2246  */
2247 static int write_dev_supers(struct btrfs_device *device,
2248                             struct btrfs_super_block *sb,
2249                             int do_barriers, int wait, int max_mirrors)
2250 {
2251         struct buffer_head *bh;
2252         int i;
2253         int ret;
2254         int errors = 0;
2255         u32 crc;
2256         u64 bytenr;
2257         int last_barrier = 0;
2258
2259         if (max_mirrors == 0)
2260                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2261
2262         /* make sure only the last submit_bh does a barrier */
2263         if (do_barriers) {
2264                 for (i = 0; i < max_mirrors; i++) {
2265                         bytenr = btrfs_sb_offset(i);
2266                         if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2267                             device->total_bytes)
2268                                 break;
2269                         last_barrier = i;
2270                 }
2271         }
2272
2273         for (i = 0; i < max_mirrors; i++) {
2274                 bytenr = btrfs_sb_offset(i);
2275                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2276                         break;
2277
2278                 if (wait) {
2279                         bh = __find_get_block(device->bdev, bytenr / 4096,
2280                                               BTRFS_SUPER_INFO_SIZE);
2281                         BUG_ON(!bh);
2282                         wait_on_buffer(bh);
2283                         if (!buffer_uptodate(bh))
2284                                 errors++;
2285
2286                         /* drop our reference */
2287                         brelse(bh);
2288
2289                         /* drop the reference from the wait == 0 run */
2290                         brelse(bh);
2291                         continue;
2292                 } else {
2293                         btrfs_set_super_bytenr(sb, bytenr);
2294
2295                         crc = ~(u32)0;
2296                         crc = btrfs_csum_data(NULL, (char *)sb +
2297                                               BTRFS_CSUM_SIZE, crc,
2298                                               BTRFS_SUPER_INFO_SIZE -
2299                                               BTRFS_CSUM_SIZE);
2300                         btrfs_csum_final(crc, sb->csum);
2301
2302                         /*
2303                          * one reference for us, and we leave it for the
2304                          * caller
2305                          */
2306                         bh = __getblk(device->bdev, bytenr / 4096,
2307                                       BTRFS_SUPER_INFO_SIZE);
2308                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2309
2310                         /* one reference for submit_bh */
2311                         get_bh(bh);
2312
2313                         set_buffer_uptodate(bh);
2314                         lock_buffer(bh);
2315                         bh->b_end_io = btrfs_end_buffer_write_sync;
2316                 }
2317
2318                 if (i == last_barrier && do_barriers)
2319                         ret = submit_bh(WRITE_FLUSH_FUA, bh);
2320                 else
2321                         ret = submit_bh(WRITE_SYNC, bh);
2322
2323                 if (ret)
2324                         errors++;
2325         }
2326         return errors < i ? 0 : -1;
2327 }
2328
2329 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2330 {
2331         struct list_head *head;
2332         struct btrfs_device *dev;
2333         struct btrfs_super_block *sb;
2334         struct btrfs_dev_item *dev_item;
2335         int ret;
2336         int do_barriers;
2337         int max_errors;
2338         int total_errors = 0;
2339         u64 flags;
2340
2341         max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2342         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2343
2344         sb = &root->fs_info->super_for_commit;
2345         dev_item = &sb->dev_item;
2346
2347         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2348         head = &root->fs_info->fs_devices->devices;
2349         list_for_each_entry_rcu(dev, head, dev_list) {
2350                 if (!dev->bdev) {
2351                         total_errors++;
2352                         continue;
2353                 }
2354                 if (!dev->in_fs_metadata || !dev->writeable)
2355                         continue;
2356
2357                 btrfs_set_stack_device_generation(dev_item, 0);
2358                 btrfs_set_stack_device_type(dev_item, dev->type);
2359                 btrfs_set_stack_device_id(dev_item, dev->devid);
2360                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2361                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2362                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2363                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2364                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2365                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2366                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2367
2368                 flags = btrfs_super_flags(sb);
2369                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2370
2371                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2372                 if (ret)
2373                         total_errors++;
2374         }
2375         if (total_errors > max_errors) {
2376                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2377                        total_errors);
2378                 BUG();
2379         }
2380
2381         total_errors = 0;
2382         list_for_each_entry_rcu(dev, head, dev_list) {
2383                 if (!dev->bdev)
2384                         continue;
2385                 if (!dev->in_fs_metadata || !dev->writeable)
2386                         continue;
2387
2388                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2389                 if (ret)
2390                         total_errors++;
2391         }
2392         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2393         if (total_errors > max_errors) {
2394                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2395                        total_errors);
2396                 BUG();
2397         }
2398         return 0;
2399 }
2400
2401 int write_ctree_super(struct btrfs_trans_handle *trans,
2402                       struct btrfs_root *root, int max_mirrors)
2403 {
2404         int ret;
2405
2406         ret = write_all_supers(root, max_mirrors);
2407         return ret;
2408 }
2409
2410 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2411 {
2412         spin_lock(&fs_info->fs_roots_radix_lock);
2413         radix_tree_delete(&fs_info->fs_roots_radix,
2414                           (unsigned long)root->root_key.objectid);
2415         spin_unlock(&fs_info->fs_roots_radix_lock);
2416
2417         if (btrfs_root_refs(&root->root_item) == 0)
2418                 synchronize_srcu(&fs_info->subvol_srcu);
2419
2420         __btrfs_remove_free_space_cache(root->free_ino_pinned);
2421         __btrfs_remove_free_space_cache(root->free_ino_ctl);
2422         free_fs_root(root);
2423         return 0;
2424 }
2425
2426 static void free_fs_root(struct btrfs_root *root)
2427 {
2428         iput(root->cache_inode);
2429         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2430         if (root->anon_dev)
2431                 free_anon_bdev(root->anon_dev);
2432         free_extent_buffer(root->node);
2433         free_extent_buffer(root->commit_root);
2434         kfree(root->free_ino_ctl);
2435         kfree(root->free_ino_pinned);
2436         kfree(root->name);
2437         kfree(root);
2438 }
2439
2440 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2441 {
2442         int ret;
2443         struct btrfs_root *gang[8];
2444         int i;
2445
2446         while (!list_empty(&fs_info->dead_roots)) {
2447                 gang[0] = list_entry(fs_info->dead_roots.next,
2448                                      struct btrfs_root, root_list);
2449                 list_del(&gang[0]->root_list);
2450
2451                 if (gang[0]->in_radix) {
2452                         btrfs_free_fs_root(fs_info, gang[0]);
2453                 } else {
2454                         free_extent_buffer(gang[0]->node);
2455                         free_extent_buffer(gang[0]->commit_root);
2456                         kfree(gang[0]);
2457                 }
2458         }
2459
2460         while (1) {
2461                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2462                                              (void **)gang, 0,
2463                                              ARRAY_SIZE(gang));
2464                 if (!ret)
2465                         break;
2466                 for (i = 0; i < ret; i++)
2467                         btrfs_free_fs_root(fs_info, gang[i]);
2468         }
2469         return 0;
2470 }
2471
2472 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2473 {
2474         u64 root_objectid = 0;
2475         struct btrfs_root *gang[8];
2476         int i;
2477         int ret;
2478
2479         while (1) {
2480                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2481                                              (void **)gang, root_objectid,
2482                                              ARRAY_SIZE(gang));
2483                 if (!ret)
2484                         break;
2485
2486                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
2487                 for (i = 0; i < ret; i++) {
2488                         int err;
2489
2490                         root_objectid = gang[i]->root_key.objectid;
2491                         err = btrfs_orphan_cleanup(gang[i]);
2492                         if (err)
2493                                 return err;
2494                 }
2495                 root_objectid++;
2496         }
2497         return 0;
2498 }
2499
2500 int btrfs_commit_super(struct btrfs_root *root)
2501 {
2502         struct btrfs_trans_handle *trans;
2503         int ret;
2504
2505         mutex_lock(&root->fs_info->cleaner_mutex);
2506         btrfs_run_delayed_iputs(root);
2507         btrfs_clean_old_snapshots(root);
2508         mutex_unlock(&root->fs_info->cleaner_mutex);
2509
2510         /* wait until ongoing cleanup work done */
2511         down_write(&root->fs_info->cleanup_work_sem);
2512         up_write(&root->fs_info->cleanup_work_sem);
2513
2514         trans = btrfs_join_transaction(root);
2515         if (IS_ERR(trans))
2516                 return PTR_ERR(trans);
2517         ret = btrfs_commit_transaction(trans, root);
2518         BUG_ON(ret);
2519         /* run commit again to drop the original snapshot */
2520         trans = btrfs_join_transaction(root);
2521         if (IS_ERR(trans))
2522                 return PTR_ERR(trans);
2523         btrfs_commit_transaction(trans, root);
2524         ret = btrfs_write_and_wait_transaction(NULL, root);
2525         BUG_ON(ret);
2526
2527         ret = write_ctree_super(NULL, root, 0);
2528         return ret;
2529 }
2530
2531 int close_ctree(struct btrfs_root *root)
2532 {
2533         struct btrfs_fs_info *fs_info = root->fs_info;
2534         int ret;
2535
2536         fs_info->closing = 1;
2537         smp_mb();
2538
2539         btrfs_scrub_cancel(root);
2540
2541         /* wait for any defraggers to finish */
2542         wait_event(fs_info->transaction_wait,
2543                    (atomic_read(&fs_info->defrag_running) == 0));
2544
2545         /* clear out the rbtree of defraggable inodes */
2546         btrfs_run_defrag_inodes(root->fs_info);
2547
2548         btrfs_put_block_group_cache(fs_info);
2549
2550         /*
2551          * Here come 2 situations when btrfs is broken to flip readonly:
2552          *
2553          * 1. when btrfs flips readonly somewhere else before
2554          * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
2555          * and btrfs will skip to write sb directly to keep
2556          * ERROR state on disk.
2557          *
2558          * 2. when btrfs flips readonly just in btrfs_commit_super,
2559          * and in such case, btrfs cannot write sb via btrfs_commit_super,
2560          * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
2561          * btrfs will cleanup all FS resources first and write sb then.
2562          */
2563         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2564                 ret = btrfs_commit_super(root);
2565                 if (ret)
2566                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2567         }
2568
2569         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
2570                 ret = btrfs_error_commit_super(root);
2571                 if (ret)
2572                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2573         }
2574
2575         kthread_stop(root->fs_info->transaction_kthread);
2576         kthread_stop(root->fs_info->cleaner_kthread);
2577
2578         fs_info->closing = 2;
2579         smp_mb();
2580
2581         if (fs_info->delalloc_bytes) {
2582                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2583                        (unsigned long long)fs_info->delalloc_bytes);
2584         }
2585         if (fs_info->total_ref_cache_size) {
2586                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2587                        (unsigned long long)fs_info->total_ref_cache_size);
2588         }
2589
2590         free_extent_buffer(fs_info->extent_root->node);
2591         free_extent_buffer(fs_info->extent_root->commit_root);
2592         free_extent_buffer(fs_info->tree_root->node);
2593         free_extent_buffer(fs_info->tree_root->commit_root);
2594         free_extent_buffer(root->fs_info->chunk_root->node);
2595         free_extent_buffer(root->fs_info->chunk_root->commit_root);
2596         free_extent_buffer(root->fs_info->dev_root->node);
2597         free_extent_buffer(root->fs_info->dev_root->commit_root);
2598         free_extent_buffer(root->fs_info->csum_root->node);
2599         free_extent_buffer(root->fs_info->csum_root->commit_root);
2600
2601         btrfs_free_block_groups(root->fs_info);
2602
2603         del_fs_roots(fs_info);
2604
2605         iput(fs_info->btree_inode);
2606         kfree(fs_info->delayed_root);
2607
2608         btrfs_stop_workers(&fs_info->generic_worker);
2609         btrfs_stop_workers(&fs_info->fixup_workers);
2610         btrfs_stop_workers(&fs_info->delalloc_workers);
2611         btrfs_stop_workers(&fs_info->workers);
2612         btrfs_stop_workers(&fs_info->endio_workers);
2613         btrfs_stop_workers(&fs_info->endio_meta_workers);
2614         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2615         btrfs_stop_workers(&fs_info->endio_write_workers);
2616         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2617         btrfs_stop_workers(&fs_info->submit_workers);
2618         btrfs_stop_workers(&fs_info->delayed_workers);
2619         btrfs_stop_workers(&fs_info->caching_workers);
2620
2621         btrfs_close_devices(fs_info->fs_devices);
2622         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2623
2624         bdi_destroy(&fs_info->bdi);
2625         cleanup_srcu_struct(&fs_info->subvol_srcu);
2626
2627         kfree(fs_info->extent_root);
2628         kfree(fs_info->tree_root);
2629         kfree(fs_info->chunk_root);
2630         kfree(fs_info->dev_root);
2631         kfree(fs_info->csum_root);
2632         kfree(fs_info);
2633
2634         return 0;
2635 }
2636
2637 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2638 {
2639         int ret;
2640         struct inode *btree_inode = buf->first_page->mapping->host;
2641
2642         ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
2643                                      NULL);
2644         if (!ret)
2645                 return ret;
2646
2647         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2648                                     parent_transid);
2649         return !ret;
2650 }
2651
2652 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2653 {
2654         struct inode *btree_inode = buf->first_page->mapping->host;
2655         return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2656                                           buf);
2657 }
2658
2659 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2660 {
2661         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2662         u64 transid = btrfs_header_generation(buf);
2663         struct inode *btree_inode = root->fs_info->btree_inode;
2664         int was_dirty;
2665
2666         btrfs_assert_tree_locked(buf);
2667         if (transid != root->fs_info->generation) {
2668                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2669                        "found %llu running %llu\n",
2670                         (unsigned long long)buf->start,
2671                         (unsigned long long)transid,
2672                         (unsigned long long)root->fs_info->generation);
2673                 WARN_ON(1);
2674         }
2675         was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2676                                             buf);
2677         if (!was_dirty) {
2678                 spin_lock(&root->fs_info->delalloc_lock);
2679                 root->fs_info->dirty_metadata_bytes += buf->len;
2680                 spin_unlock(&root->fs_info->delalloc_lock);
2681         }
2682 }
2683
2684 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2685 {
2686         /*
2687          * looks as though older kernels can get into trouble with
2688          * this code, they end up stuck in balance_dirty_pages forever
2689          */
2690         u64 num_dirty;
2691         unsigned long thresh = 32 * 1024 * 1024;
2692
2693         if (current->flags & PF_MEMALLOC)
2694                 return;
2695
2696         btrfs_balance_delayed_items(root);
2697
2698         num_dirty = root->fs_info->dirty_metadata_bytes;
2699
2700         if (num_dirty > thresh) {
2701                 balance_dirty_pages_ratelimited_nr(
2702                                    root->fs_info->btree_inode->i_mapping, 1);
2703         }
2704         return;
2705 }
2706
2707 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2708 {
2709         /*
2710          * looks as though older kernels can get into trouble with
2711          * this code, they end up stuck in balance_dirty_pages forever
2712          */
2713         u64 num_dirty;
2714         unsigned long thresh = 32 * 1024 * 1024;
2715
2716         if (current->flags & PF_MEMALLOC)
2717                 return;
2718
2719         num_dirty = root->fs_info->dirty_metadata_bytes;
2720
2721         if (num_dirty > thresh) {
2722                 balance_dirty_pages_ratelimited_nr(
2723                                    root->fs_info->btree_inode->i_mapping, 1);
2724         }
2725         return;
2726 }
2727
2728 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2729 {
2730         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2731         int ret;
2732         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2733         if (ret == 0)
2734                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2735         return ret;
2736 }
2737
2738 int btree_lock_page_hook(struct page *page)
2739 {
2740         struct inode *inode = page->mapping->host;
2741         struct btrfs_root *root = BTRFS_I(inode)->root;
2742         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2743         struct extent_buffer *eb;
2744         unsigned long len;
2745         u64 bytenr = page_offset(page);
2746
2747         if (page->private == EXTENT_PAGE_PRIVATE)
2748                 goto out;
2749
2750         len = page->private >> 2;
2751         eb = find_extent_buffer(io_tree, bytenr, len);
2752         if (!eb)
2753                 goto out;
2754
2755         btrfs_tree_lock(eb);
2756         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2757
2758         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2759                 spin_lock(&root->fs_info->delalloc_lock);
2760                 if (root->fs_info->dirty_metadata_bytes >= eb->len)
2761                         root->fs_info->dirty_metadata_bytes -= eb->len;
2762                 else
2763                         WARN_ON(1);
2764                 spin_unlock(&root->fs_info->delalloc_lock);
2765         }
2766
2767         btrfs_tree_unlock(eb);
2768         free_extent_buffer(eb);
2769 out:
2770         lock_page(page);
2771         return 0;
2772 }
2773
2774 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
2775                               int read_only)
2776 {
2777         if (read_only)
2778                 return;
2779
2780         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2781                 printk(KERN_WARNING "warning: mount fs with errors, "
2782                        "running btrfsck is recommended\n");
2783 }
2784
2785 int btrfs_error_commit_super(struct btrfs_root *root)
2786 {
2787         int ret;
2788
2789         mutex_lock(&root->fs_info->cleaner_mutex);
2790         btrfs_run_delayed_iputs(root);
2791         mutex_unlock(&root->fs_info->cleaner_mutex);
2792
2793         down_write(&root->fs_info->cleanup_work_sem);
2794         up_write(&root->fs_info->cleanup_work_sem);
2795
2796         /* cleanup FS via transaction */
2797         btrfs_cleanup_transaction(root);
2798
2799         ret = write_ctree_super(NULL, root, 0);
2800
2801         return ret;
2802 }
2803
2804 static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
2805 {
2806         struct btrfs_inode *btrfs_inode;
2807         struct list_head splice;
2808
2809         INIT_LIST_HEAD(&splice);
2810
2811         mutex_lock(&root->fs_info->ordered_operations_mutex);
2812         spin_lock(&root->fs_info->ordered_extent_lock);
2813
2814         list_splice_init(&root->fs_info->ordered_operations, &splice);
2815         while (!list_empty(&splice)) {
2816                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2817                                          ordered_operations);
2818
2819                 list_del_init(&btrfs_inode->ordered_operations);
2820
2821                 btrfs_invalidate_inodes(btrfs_inode->root);
2822         }
2823
2824         spin_unlock(&root->fs_info->ordered_extent_lock);
2825         mutex_unlock(&root->fs_info->ordered_operations_mutex);
2826
2827         return 0;
2828 }
2829
2830 static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
2831 {
2832         struct list_head splice;
2833         struct btrfs_ordered_extent *ordered;
2834         struct inode *inode;
2835
2836         INIT_LIST_HEAD(&splice);
2837
2838         spin_lock(&root->fs_info->ordered_extent_lock);
2839
2840         list_splice_init(&root->fs_info->ordered_extents, &splice);
2841         while (!list_empty(&splice)) {
2842                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
2843                                      root_extent_list);
2844
2845                 list_del_init(&ordered->root_extent_list);
2846                 atomic_inc(&ordered->refs);
2847
2848                 /* the inode may be getting freed (in sys_unlink path). */
2849                 inode = igrab(ordered->inode);
2850
2851                 spin_unlock(&root->fs_info->ordered_extent_lock);
2852                 if (inode)
2853                         iput(inode);
2854
2855                 atomic_set(&ordered->refs, 1);
2856                 btrfs_put_ordered_extent(ordered);
2857
2858                 spin_lock(&root->fs_info->ordered_extent_lock);
2859         }
2860
2861         spin_unlock(&root->fs_info->ordered_extent_lock);
2862
2863         return 0;
2864 }
2865
2866 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2867                                       struct btrfs_root *root)
2868 {
2869         struct rb_node *node;
2870         struct btrfs_delayed_ref_root *delayed_refs;
2871         struct btrfs_delayed_ref_node *ref;
2872         int ret = 0;
2873
2874         delayed_refs = &trans->delayed_refs;
2875
2876         spin_lock(&delayed_refs->lock);
2877         if (delayed_refs->num_entries == 0) {
2878                 spin_unlock(&delayed_refs->lock);
2879                 printk(KERN_INFO "delayed_refs has NO entry\n");
2880                 return ret;
2881         }
2882
2883         node = rb_first(&delayed_refs->root);
2884         while (node) {
2885                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2886                 node = rb_next(node);
2887
2888                 ref->in_tree = 0;
2889                 rb_erase(&ref->rb_node, &delayed_refs->root);
2890                 delayed_refs->num_entries--;
2891
2892                 atomic_set(&ref->refs, 1);
2893                 if (btrfs_delayed_ref_is_head(ref)) {
2894                         struct btrfs_delayed_ref_head *head;
2895
2896                         head = btrfs_delayed_node_to_head(ref);
2897                         mutex_lock(&head->mutex);
2898                         kfree(head->extent_op);
2899                         delayed_refs->num_heads--;
2900                         if (list_empty(&head->cluster))
2901                                 delayed_refs->num_heads_ready--;
2902                         list_del_init(&head->cluster);
2903                         mutex_unlock(&head->mutex);
2904                 }
2905
2906                 spin_unlock(&delayed_refs->lock);
2907                 btrfs_put_delayed_ref(ref);
2908
2909                 cond_resched();
2910                 spin_lock(&delayed_refs->lock);
2911         }
2912
2913         spin_unlock(&delayed_refs->lock);
2914
2915         return ret;
2916 }
2917
2918 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
2919 {
2920         struct btrfs_pending_snapshot *snapshot;
2921         struct list_head splice;
2922
2923         INIT_LIST_HEAD(&splice);
2924
2925         list_splice_init(&t->pending_snapshots, &splice);
2926
2927         while (!list_empty(&splice)) {
2928                 snapshot = list_entry(splice.next,
2929                                       struct btrfs_pending_snapshot,
2930                                       list);
2931
2932                 list_del_init(&snapshot->list);
2933
2934                 kfree(snapshot);
2935         }
2936
2937         return 0;
2938 }
2939
2940 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
2941 {
2942         struct btrfs_inode *btrfs_inode;
2943         struct list_head splice;
2944
2945         INIT_LIST_HEAD(&splice);
2946
2947         spin_lock(&root->fs_info->delalloc_lock);
2948         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2949
2950         while (!list_empty(&splice)) {
2951                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2952                                     delalloc_inodes);
2953
2954                 list_del_init(&btrfs_inode->delalloc_inodes);
2955
2956                 btrfs_invalidate_inodes(btrfs_inode->root);
2957         }
2958
2959         spin_unlock(&root->fs_info->delalloc_lock);
2960
2961         return 0;
2962 }
2963
2964 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
2965                                         struct extent_io_tree *dirty_pages,
2966                                         int mark)
2967 {
2968         int ret;
2969         struct page *page;
2970         struct inode *btree_inode = root->fs_info->btree_inode;
2971         struct extent_buffer *eb;
2972         u64 start = 0;
2973         u64 end;
2974         u64 offset;
2975         unsigned long index;
2976
2977         while (1) {
2978                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
2979                                             mark);
2980                 if (ret)
2981                         break;
2982
2983                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
2984                 while (start <= end) {
2985                         index = start >> PAGE_CACHE_SHIFT;
2986                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
2987                         page = find_get_page(btree_inode->i_mapping, index);
2988                         if (!page)
2989                                 continue;
2990                         offset = page_offset(page);
2991
2992                         spin_lock(&dirty_pages->buffer_lock);
2993                         eb = radix_tree_lookup(
2994                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
2995                                                offset >> PAGE_CACHE_SHIFT);
2996                         spin_unlock(&dirty_pages->buffer_lock);
2997                         if (eb) {
2998                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
2999                                                          &eb->bflags);
3000                                 atomic_set(&eb->refs, 1);
3001                         }
3002                         if (PageWriteback(page))
3003                                 end_page_writeback(page);
3004
3005                         lock_page(page);
3006                         if (PageDirty(page)) {
3007                                 clear_page_dirty_for_io(page);
3008                                 spin_lock_irq(&page->mapping->tree_lock);
3009                                 radix_tree_tag_clear(&page->mapping->page_tree,
3010                                                         page_index(page),
3011                                                         PAGECACHE_TAG_DIRTY);
3012                                 spin_unlock_irq(&page->mapping->tree_lock);
3013                         }
3014
3015                         page->mapping->a_ops->invalidatepage(page, 0);
3016                         unlock_page(page);
3017                 }
3018         }
3019
3020         return ret;
3021 }
3022
3023 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3024                                        struct extent_io_tree *pinned_extents)
3025 {
3026         struct extent_io_tree *unpin;
3027         u64 start;
3028         u64 end;
3029         int ret;
3030
3031         unpin = pinned_extents;
3032         while (1) {
3033                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3034                                             EXTENT_DIRTY);
3035                 if (ret)
3036                         break;
3037
3038                 /* opt_discard */
3039                 if (btrfs_test_opt(root, DISCARD))
3040                         ret = btrfs_error_discard_extent(root, start,
3041                                                          end + 1 - start,
3042                                                          NULL);
3043
3044                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3045                 btrfs_error_unpin_extent_range(root, start, end);
3046                 cond_resched();
3047         }
3048
3049         return 0;
3050 }
3051
3052 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3053 {
3054         struct btrfs_transaction *t;
3055         LIST_HEAD(list);
3056
3057         WARN_ON(1);
3058
3059         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3060
3061         spin_lock(&root->fs_info->trans_lock);
3062         list_splice_init(&root->fs_info->trans_list, &list);
3063         root->fs_info->trans_no_join = 1;
3064         spin_unlock(&root->fs_info->trans_lock);
3065
3066         while (!list_empty(&list)) {
3067                 t = list_entry(list.next, struct btrfs_transaction, list);
3068                 if (!t)
3069                         break;
3070
3071                 btrfs_destroy_ordered_operations(root);
3072
3073                 btrfs_destroy_ordered_extents(root);
3074
3075                 btrfs_destroy_delayed_refs(t, root);
3076
3077                 btrfs_block_rsv_release(root,
3078                                         &root->fs_info->trans_block_rsv,
3079                                         t->dirty_pages.dirty_bytes);
3080
3081                 /* FIXME: cleanup wait for commit */
3082                 t->in_commit = 1;
3083                 t->blocked = 1;
3084                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3085                         wake_up(&root->fs_info->transaction_blocked_wait);
3086
3087                 t->blocked = 0;
3088                 if (waitqueue_active(&root->fs_info->transaction_wait))
3089                         wake_up(&root->fs_info->transaction_wait);
3090
3091                 t->commit_done = 1;
3092                 if (waitqueue_active(&t->commit_wait))
3093                         wake_up(&t->commit_wait);
3094
3095                 btrfs_destroy_pending_snapshots(t);
3096
3097                 btrfs_destroy_delalloc_inodes(root);
3098
3099                 spin_lock(&root->fs_info->trans_lock);
3100                 root->fs_info->running_transaction = NULL;
3101                 spin_unlock(&root->fs_info->trans_lock);
3102
3103                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3104                                              EXTENT_DIRTY);
3105
3106                 btrfs_destroy_pinned_extent(root,
3107                                             root->fs_info->pinned_extents);
3108
3109                 atomic_set(&t->use_count, 0);
3110                 list_del_init(&t->list);
3111                 memset(t, 0, sizeof(*t));
3112                 kmem_cache_free(btrfs_transaction_cachep, t);
3113         }
3114
3115         spin_lock(&root->fs_info->trans_lock);
3116         root->fs_info->trans_no_join = 0;
3117         spin_unlock(&root->fs_info->trans_lock);
3118         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3119
3120         return 0;
3121 }
3122
3123 static struct extent_io_ops btree_extent_io_ops = {
3124         .write_cache_pages_lock_hook = btree_lock_page_hook,
3125         .readpage_end_io_hook = btree_readpage_end_io_hook,
3126         .submit_bio_hook = btree_submit_bio_hook,
3127         /* note we're sharing with inode.c for the merge bio hook */
3128         .merge_bio_hook = btrfs_merge_bio_hook,
3129 };