]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - fs/btrfs/transaction.c
Btrfs: improve multi-thread buffer read
[~shefty/rdma-dev.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "inode-map.h"
31 #include "volumes.h"
32
33 #define BTRFS_ROOT_TRANS_TAG 0
34
35 void put_transaction(struct btrfs_transaction *transaction)
36 {
37         WARN_ON(atomic_read(&transaction->use_count) == 0);
38         if (atomic_dec_and_test(&transaction->use_count)) {
39                 BUG_ON(!list_empty(&transaction->list));
40                 WARN_ON(transaction->delayed_refs.root.rb_node);
41                 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
42                 memset(transaction, 0, sizeof(*transaction));
43                 kmem_cache_free(btrfs_transaction_cachep, transaction);
44         }
45 }
46
47 static noinline void switch_commit_root(struct btrfs_root *root)
48 {
49         free_extent_buffer(root->commit_root);
50         root->commit_root = btrfs_root_node(root);
51 }
52
53 /*
54  * either allocate a new transaction or hop into the existing one
55  */
56 static noinline int join_transaction(struct btrfs_root *root, int nofail)
57 {
58         struct btrfs_transaction *cur_trans;
59         struct btrfs_fs_info *fs_info = root->fs_info;
60
61         spin_lock(&fs_info->trans_lock);
62 loop:
63         /* The file system has been taken offline. No new transactions. */
64         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
65                 spin_unlock(&fs_info->trans_lock);
66                 return -EROFS;
67         }
68
69         if (fs_info->trans_no_join) {
70                 if (!nofail) {
71                         spin_unlock(&fs_info->trans_lock);
72                         return -EBUSY;
73                 }
74         }
75
76         cur_trans = fs_info->running_transaction;
77         if (cur_trans) {
78                 if (cur_trans->aborted) {
79                         spin_unlock(&fs_info->trans_lock);
80                         return cur_trans->aborted;
81                 }
82                 atomic_inc(&cur_trans->use_count);
83                 atomic_inc(&cur_trans->num_writers);
84                 cur_trans->num_joined++;
85                 spin_unlock(&fs_info->trans_lock);
86                 return 0;
87         }
88         spin_unlock(&fs_info->trans_lock);
89
90         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
91         if (!cur_trans)
92                 return -ENOMEM;
93
94         spin_lock(&fs_info->trans_lock);
95         if (fs_info->running_transaction) {
96                 /*
97                  * someone started a transaction after we unlocked.  Make sure
98                  * to redo the trans_no_join checks above
99                  */
100                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
101                 cur_trans = fs_info->running_transaction;
102                 goto loop;
103         } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
104                 spin_unlock(&fs_info->trans_lock);
105                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
106                 return -EROFS;
107         }
108
109         atomic_set(&cur_trans->num_writers, 1);
110         cur_trans->num_joined = 0;
111         init_waitqueue_head(&cur_trans->writer_wait);
112         init_waitqueue_head(&cur_trans->commit_wait);
113         cur_trans->in_commit = 0;
114         cur_trans->blocked = 0;
115         /*
116          * One for this trans handle, one so it will live on until we
117          * commit the transaction.
118          */
119         atomic_set(&cur_trans->use_count, 2);
120         cur_trans->commit_done = 0;
121         cur_trans->start_time = get_seconds();
122
123         cur_trans->delayed_refs.root = RB_ROOT;
124         cur_trans->delayed_refs.num_entries = 0;
125         cur_trans->delayed_refs.num_heads_ready = 0;
126         cur_trans->delayed_refs.num_heads = 0;
127         cur_trans->delayed_refs.flushing = 0;
128         cur_trans->delayed_refs.run_delayed_start = 0;
129         cur_trans->delayed_refs.seq = 1;
130
131         /*
132          * although the tree mod log is per file system and not per transaction,
133          * the log must never go across transaction boundaries.
134          */
135         smp_mb();
136         if (!list_empty(&fs_info->tree_mod_seq_list)) {
137                 printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
138                         "creating a fresh transaction\n");
139                 WARN_ON(1);
140         }
141         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
142                 printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
143                         "creating a fresh transaction\n");
144                 WARN_ON(1);
145         }
146         atomic_set(&fs_info->tree_mod_seq, 0);
147
148         init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
149         spin_lock_init(&cur_trans->commit_lock);
150         spin_lock_init(&cur_trans->delayed_refs.lock);
151         INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
152
153         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
154         list_add_tail(&cur_trans->list, &fs_info->trans_list);
155         extent_io_tree_init(&cur_trans->dirty_pages,
156                              fs_info->btree_inode->i_mapping);
157         fs_info->generation++;
158         cur_trans->transid = fs_info->generation;
159         fs_info->running_transaction = cur_trans;
160         cur_trans->aborted = 0;
161         spin_unlock(&fs_info->trans_lock);
162
163         return 0;
164 }
165
166 /*
167  * this does all the record keeping required to make sure that a reference
168  * counted root is properly recorded in a given transaction.  This is required
169  * to make sure the old root from before we joined the transaction is deleted
170  * when the transaction commits
171  */
172 static int record_root_in_trans(struct btrfs_trans_handle *trans,
173                                struct btrfs_root *root)
174 {
175         if (root->ref_cows && root->last_trans < trans->transid) {
176                 WARN_ON(root == root->fs_info->extent_root);
177                 WARN_ON(root->commit_root != root->node);
178
179                 /*
180                  * see below for in_trans_setup usage rules
181                  * we have the reloc mutex held now, so there
182                  * is only one writer in this function
183                  */
184                 root->in_trans_setup = 1;
185
186                 /* make sure readers find in_trans_setup before
187                  * they find our root->last_trans update
188                  */
189                 smp_wmb();
190
191                 spin_lock(&root->fs_info->fs_roots_radix_lock);
192                 if (root->last_trans == trans->transid) {
193                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
194                         return 0;
195                 }
196                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
197                            (unsigned long)root->root_key.objectid,
198                            BTRFS_ROOT_TRANS_TAG);
199                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
200                 root->last_trans = trans->transid;
201
202                 /* this is pretty tricky.  We don't want to
203                  * take the relocation lock in btrfs_record_root_in_trans
204                  * unless we're really doing the first setup for this root in
205                  * this transaction.
206                  *
207                  * Normally we'd use root->last_trans as a flag to decide
208                  * if we want to take the expensive mutex.
209                  *
210                  * But, we have to set root->last_trans before we
211                  * init the relocation root, otherwise, we trip over warnings
212                  * in ctree.c.  The solution used here is to flag ourselves
213                  * with root->in_trans_setup.  When this is 1, we're still
214                  * fixing up the reloc trees and everyone must wait.
215                  *
216                  * When this is zero, they can trust root->last_trans and fly
217                  * through btrfs_record_root_in_trans without having to take the
218                  * lock.  smp_wmb() makes sure that all the writes above are
219                  * done before we pop in the zero below
220                  */
221                 btrfs_init_reloc_root(trans, root);
222                 smp_wmb();
223                 root->in_trans_setup = 0;
224         }
225         return 0;
226 }
227
228
229 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
230                                struct btrfs_root *root)
231 {
232         if (!root->ref_cows)
233                 return 0;
234
235         /*
236          * see record_root_in_trans for comments about in_trans_setup usage
237          * and barriers
238          */
239         smp_rmb();
240         if (root->last_trans == trans->transid &&
241             !root->in_trans_setup)
242                 return 0;
243
244         mutex_lock(&root->fs_info->reloc_mutex);
245         record_root_in_trans(trans, root);
246         mutex_unlock(&root->fs_info->reloc_mutex);
247
248         return 0;
249 }
250
251 /* wait for commit against the current transaction to become unblocked
252  * when this is done, it is safe to start a new transaction, but the current
253  * transaction might not be fully on disk.
254  */
255 static void wait_current_trans(struct btrfs_root *root)
256 {
257         struct btrfs_transaction *cur_trans;
258
259         spin_lock(&root->fs_info->trans_lock);
260         cur_trans = root->fs_info->running_transaction;
261         if (cur_trans && cur_trans->blocked) {
262                 atomic_inc(&cur_trans->use_count);
263                 spin_unlock(&root->fs_info->trans_lock);
264
265                 wait_event(root->fs_info->transaction_wait,
266                            !cur_trans->blocked);
267                 put_transaction(cur_trans);
268         } else {
269                 spin_unlock(&root->fs_info->trans_lock);
270         }
271 }
272
273 enum btrfs_trans_type {
274         TRANS_START,
275         TRANS_JOIN,
276         TRANS_USERSPACE,
277         TRANS_JOIN_NOLOCK,
278 };
279
280 static int may_wait_transaction(struct btrfs_root *root, int type)
281 {
282         if (root->fs_info->log_root_recovering)
283                 return 0;
284
285         if (type == TRANS_USERSPACE)
286                 return 1;
287
288         if (type == TRANS_START &&
289             !atomic_read(&root->fs_info->open_ioctl_trans))
290                 return 1;
291
292         return 0;
293 }
294
295 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
296                                                     u64 num_items, int type)
297 {
298         struct btrfs_trans_handle *h;
299         struct btrfs_transaction *cur_trans;
300         u64 num_bytes = 0;
301         int ret;
302
303         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
304                 return ERR_PTR(-EROFS);
305
306         if (current->journal_info) {
307                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
308                 h = current->journal_info;
309                 h->use_count++;
310                 h->orig_rsv = h->block_rsv;
311                 h->block_rsv = NULL;
312                 goto got_it;
313         }
314
315         /*
316          * Do the reservation before we join the transaction so we can do all
317          * the appropriate flushing if need be.
318          */
319         if (num_items > 0 && root != root->fs_info->chunk_root) {
320                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
321                 ret = btrfs_block_rsv_add(root,
322                                           &root->fs_info->trans_block_rsv,
323                                           num_bytes);
324                 if (ret)
325                         return ERR_PTR(ret);
326         }
327 again:
328         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
329         if (!h)
330                 return ERR_PTR(-ENOMEM);
331
332         if (may_wait_transaction(root, type))
333                 wait_current_trans(root);
334
335         do {
336                 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
337                 if (ret == -EBUSY)
338                         wait_current_trans(root);
339         } while (ret == -EBUSY);
340
341         if (ret < 0) {
342                 kmem_cache_free(btrfs_trans_handle_cachep, h);
343                 return ERR_PTR(ret);
344         }
345
346         cur_trans = root->fs_info->running_transaction;
347
348         h->transid = cur_trans->transid;
349         h->transaction = cur_trans;
350         h->blocks_used = 0;
351         h->bytes_reserved = 0;
352         h->delayed_ref_updates = 0;
353         h->use_count = 1;
354         h->adding_csums = 0;
355         h->block_rsv = NULL;
356         h->orig_rsv = NULL;
357         h->aborted = 0;
358
359         smp_mb();
360         if (cur_trans->blocked && may_wait_transaction(root, type)) {
361                 btrfs_commit_transaction(h, root);
362                 goto again;
363         }
364
365         if (num_bytes) {
366                 trace_btrfs_space_reservation(root->fs_info, "transaction",
367                                               h->transid, num_bytes, 1);
368                 h->block_rsv = &root->fs_info->trans_block_rsv;
369                 h->bytes_reserved = num_bytes;
370         }
371
372 got_it:
373         btrfs_record_root_in_trans(h, root);
374
375         if (!current->journal_info && type != TRANS_USERSPACE)
376                 current->journal_info = h;
377         return h;
378 }
379
380 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
381                                                    int num_items)
382 {
383         return start_transaction(root, num_items, TRANS_START);
384 }
385 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
386 {
387         return start_transaction(root, 0, TRANS_JOIN);
388 }
389
390 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
391 {
392         return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
393 }
394
395 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
396 {
397         return start_transaction(root, 0, TRANS_USERSPACE);
398 }
399
400 /* wait for a transaction commit to be fully complete */
401 static noinline void wait_for_commit(struct btrfs_root *root,
402                                     struct btrfs_transaction *commit)
403 {
404         wait_event(commit->commit_wait, commit->commit_done);
405 }
406
407 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
408 {
409         struct btrfs_transaction *cur_trans = NULL, *t;
410         int ret;
411
412         ret = 0;
413         if (transid) {
414                 if (transid <= root->fs_info->last_trans_committed)
415                         goto out;
416
417                 /* find specified transaction */
418                 spin_lock(&root->fs_info->trans_lock);
419                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
420                         if (t->transid == transid) {
421                                 cur_trans = t;
422                                 atomic_inc(&cur_trans->use_count);
423                                 break;
424                         }
425                         if (t->transid > transid)
426                                 break;
427                 }
428                 spin_unlock(&root->fs_info->trans_lock);
429                 ret = -EINVAL;
430                 if (!cur_trans)
431                         goto out;  /* bad transid */
432         } else {
433                 /* find newest transaction that is committing | committed */
434                 spin_lock(&root->fs_info->trans_lock);
435                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
436                                             list) {
437                         if (t->in_commit) {
438                                 if (t->commit_done)
439                                         break;
440                                 cur_trans = t;
441                                 atomic_inc(&cur_trans->use_count);
442                                 break;
443                         }
444                 }
445                 spin_unlock(&root->fs_info->trans_lock);
446                 if (!cur_trans)
447                         goto out;  /* nothing committing|committed */
448         }
449
450         wait_for_commit(root, cur_trans);
451
452         put_transaction(cur_trans);
453         ret = 0;
454 out:
455         return ret;
456 }
457
458 void btrfs_throttle(struct btrfs_root *root)
459 {
460         if (!atomic_read(&root->fs_info->open_ioctl_trans))
461                 wait_current_trans(root);
462 }
463
464 static int should_end_transaction(struct btrfs_trans_handle *trans,
465                                   struct btrfs_root *root)
466 {
467         int ret;
468
469         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
470         return ret ? 1 : 0;
471 }
472
473 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
474                                  struct btrfs_root *root)
475 {
476         struct btrfs_transaction *cur_trans = trans->transaction;
477         int updates;
478         int err;
479
480         smp_mb();
481         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
482                 return 1;
483
484         updates = trans->delayed_ref_updates;
485         trans->delayed_ref_updates = 0;
486         if (updates) {
487                 err = btrfs_run_delayed_refs(trans, root, updates);
488                 if (err) /* Error code will also eval true */
489                         return err;
490         }
491
492         return should_end_transaction(trans, root);
493 }
494
495 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
496                           struct btrfs_root *root, int throttle, int lock)
497 {
498         struct btrfs_transaction *cur_trans = trans->transaction;
499         struct btrfs_fs_info *info = root->fs_info;
500         int count = 0;
501         int err = 0;
502
503         if (--trans->use_count) {
504                 trans->block_rsv = trans->orig_rsv;
505                 return 0;
506         }
507
508         while (count < 2) {
509                 unsigned long cur = trans->delayed_ref_updates;
510                 trans->delayed_ref_updates = 0;
511                 if (cur &&
512                     trans->transaction->delayed_refs.num_heads_ready > 64) {
513                         trans->delayed_ref_updates = 0;
514                         btrfs_run_delayed_refs(trans, root, cur);
515                 } else {
516                         break;
517                 }
518                 count++;
519         }
520         btrfs_trans_release_metadata(trans, root);
521         trans->block_rsv = NULL;
522
523         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
524             should_end_transaction(trans, root)) {
525                 trans->transaction->blocked = 1;
526                 smp_wmb();
527         }
528
529         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
530                 if (throttle) {
531                         /*
532                          * We may race with somebody else here so end up having
533                          * to call end_transaction on ourselves again, so inc
534                          * our use_count.
535                          */
536                         trans->use_count++;
537                         return btrfs_commit_transaction(trans, root);
538                 } else {
539                         wake_up_process(info->transaction_kthread);
540                 }
541         }
542
543         WARN_ON(cur_trans != info->running_transaction);
544         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
545         atomic_dec(&cur_trans->num_writers);
546
547         smp_mb();
548         if (waitqueue_active(&cur_trans->writer_wait))
549                 wake_up(&cur_trans->writer_wait);
550         put_transaction(cur_trans);
551
552         if (current->journal_info == trans)
553                 current->journal_info = NULL;
554
555         if (throttle)
556                 btrfs_run_delayed_iputs(root);
557
558         if (trans->aborted ||
559             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
560                 err = -EIO;
561         }
562
563         memset(trans, 0, sizeof(*trans));
564         kmem_cache_free(btrfs_trans_handle_cachep, trans);
565         return err;
566 }
567
568 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
569                           struct btrfs_root *root)
570 {
571         int ret;
572
573         ret = __btrfs_end_transaction(trans, root, 0, 1);
574         if (ret)
575                 return ret;
576         return 0;
577 }
578
579 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
580                                    struct btrfs_root *root)
581 {
582         int ret;
583
584         ret = __btrfs_end_transaction(trans, root, 1, 1);
585         if (ret)
586                 return ret;
587         return 0;
588 }
589
590 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
591                                  struct btrfs_root *root)
592 {
593         int ret;
594
595         ret = __btrfs_end_transaction(trans, root, 0, 0);
596         if (ret)
597                 return ret;
598         return 0;
599 }
600
601 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
602                                 struct btrfs_root *root)
603 {
604         return __btrfs_end_transaction(trans, root, 1, 1);
605 }
606
607 /*
608  * when btree blocks are allocated, they have some corresponding bits set for
609  * them in one of two extent_io trees.  This is used to make sure all of
610  * those extents are sent to disk but does not wait on them
611  */
612 int btrfs_write_marked_extents(struct btrfs_root *root,
613                                struct extent_io_tree *dirty_pages, int mark)
614 {
615         int err = 0;
616         int werr = 0;
617         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
618         u64 start = 0;
619         u64 end;
620
621         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
622                                       mark)) {
623                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
624                                    GFP_NOFS);
625                 err = filemap_fdatawrite_range(mapping, start, end);
626                 if (err)
627                         werr = err;
628                 cond_resched();
629                 start = end + 1;
630         }
631         if (err)
632                 werr = err;
633         return werr;
634 }
635
636 /*
637  * when btree blocks are allocated, they have some corresponding bits set for
638  * them in one of two extent_io trees.  This is used to make sure all of
639  * those extents are on disk for transaction or log commit.  We wait
640  * on all the pages and clear them from the dirty pages state tree
641  */
642 int btrfs_wait_marked_extents(struct btrfs_root *root,
643                               struct extent_io_tree *dirty_pages, int mark)
644 {
645         int err = 0;
646         int werr = 0;
647         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
648         u64 start = 0;
649         u64 end;
650
651         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
652                                       EXTENT_NEED_WAIT)) {
653                 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
654                 err = filemap_fdatawait_range(mapping, start, end);
655                 if (err)
656                         werr = err;
657                 cond_resched();
658                 start = end + 1;
659         }
660         if (err)
661                 werr = err;
662         return werr;
663 }
664
665 /*
666  * when btree blocks are allocated, they have some corresponding bits set for
667  * them in one of two extent_io trees.  This is used to make sure all of
668  * those extents are on disk for transaction or log commit
669  */
670 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
671                                 struct extent_io_tree *dirty_pages, int mark)
672 {
673         int ret;
674         int ret2;
675
676         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
677         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
678
679         if (ret)
680                 return ret;
681         if (ret2)
682                 return ret2;
683         return 0;
684 }
685
686 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
687                                      struct btrfs_root *root)
688 {
689         if (!trans || !trans->transaction) {
690                 struct inode *btree_inode;
691                 btree_inode = root->fs_info->btree_inode;
692                 return filemap_write_and_wait(btree_inode->i_mapping);
693         }
694         return btrfs_write_and_wait_marked_extents(root,
695                                            &trans->transaction->dirty_pages,
696                                            EXTENT_DIRTY);
697 }
698
699 /*
700  * this is used to update the root pointer in the tree of tree roots.
701  *
702  * But, in the case of the extent allocation tree, updating the root
703  * pointer may allocate blocks which may change the root of the extent
704  * allocation tree.
705  *
706  * So, this loops and repeats and makes sure the cowonly root didn't
707  * change while the root pointer was being updated in the metadata.
708  */
709 static int update_cowonly_root(struct btrfs_trans_handle *trans,
710                                struct btrfs_root *root)
711 {
712         int ret;
713         u64 old_root_bytenr;
714         u64 old_root_used;
715         struct btrfs_root *tree_root = root->fs_info->tree_root;
716
717         old_root_used = btrfs_root_used(&root->root_item);
718         btrfs_write_dirty_block_groups(trans, root);
719
720         while (1) {
721                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
722                 if (old_root_bytenr == root->node->start &&
723                     old_root_used == btrfs_root_used(&root->root_item))
724                         break;
725
726                 btrfs_set_root_node(&root->root_item, root->node);
727                 ret = btrfs_update_root(trans, tree_root,
728                                         &root->root_key,
729                                         &root->root_item);
730                 if (ret)
731                         return ret;
732
733                 old_root_used = btrfs_root_used(&root->root_item);
734                 ret = btrfs_write_dirty_block_groups(trans, root);
735                 if (ret)
736                         return ret;
737         }
738
739         if (root != root->fs_info->extent_root)
740                 switch_commit_root(root);
741
742         return 0;
743 }
744
745 /*
746  * update all the cowonly tree roots on disk
747  *
748  * The error handling in this function may not be obvious. Any of the
749  * failures will cause the file system to go offline. We still need
750  * to clean up the delayed refs.
751  */
752 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
753                                          struct btrfs_root *root)
754 {
755         struct btrfs_fs_info *fs_info = root->fs_info;
756         struct list_head *next;
757         struct extent_buffer *eb;
758         int ret;
759
760         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
761         if (ret)
762                 return ret;
763
764         eb = btrfs_lock_root_node(fs_info->tree_root);
765         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
766                               0, &eb);
767         btrfs_tree_unlock(eb);
768         free_extent_buffer(eb);
769
770         if (ret)
771                 return ret;
772
773         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
774         if (ret)
775                 return ret;
776
777         ret = btrfs_run_dev_stats(trans, root->fs_info);
778         BUG_ON(ret);
779
780         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
781                 next = fs_info->dirty_cowonly_roots.next;
782                 list_del_init(next);
783                 root = list_entry(next, struct btrfs_root, dirty_list);
784
785                 ret = update_cowonly_root(trans, root);
786                 if (ret)
787                         return ret;
788         }
789
790         down_write(&fs_info->extent_commit_sem);
791         switch_commit_root(fs_info->extent_root);
792         up_write(&fs_info->extent_commit_sem);
793
794         return 0;
795 }
796
797 /*
798  * dead roots are old snapshots that need to be deleted.  This allocates
799  * a dirty root struct and adds it into the list of dead roots that need to
800  * be deleted
801  */
802 int btrfs_add_dead_root(struct btrfs_root *root)
803 {
804         spin_lock(&root->fs_info->trans_lock);
805         list_add(&root->root_list, &root->fs_info->dead_roots);
806         spin_unlock(&root->fs_info->trans_lock);
807         return 0;
808 }
809
810 /*
811  * update all the cowonly tree roots on disk
812  */
813 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
814                                     struct btrfs_root *root)
815 {
816         struct btrfs_root *gang[8];
817         struct btrfs_fs_info *fs_info = root->fs_info;
818         int i;
819         int ret;
820         int err = 0;
821
822         spin_lock(&fs_info->fs_roots_radix_lock);
823         while (1) {
824                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
825                                                  (void **)gang, 0,
826                                                  ARRAY_SIZE(gang),
827                                                  BTRFS_ROOT_TRANS_TAG);
828                 if (ret == 0)
829                         break;
830                 for (i = 0; i < ret; i++) {
831                         root = gang[i];
832                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
833                                         (unsigned long)root->root_key.objectid,
834                                         BTRFS_ROOT_TRANS_TAG);
835                         spin_unlock(&fs_info->fs_roots_radix_lock);
836
837                         btrfs_free_log(trans, root);
838                         btrfs_update_reloc_root(trans, root);
839                         btrfs_orphan_commit_root(trans, root);
840
841                         btrfs_save_ino_cache(root, trans);
842
843                         /* see comments in should_cow_block() */
844                         root->force_cow = 0;
845                         smp_wmb();
846
847                         if (root->commit_root != root->node) {
848                                 mutex_lock(&root->fs_commit_mutex);
849                                 switch_commit_root(root);
850                                 btrfs_unpin_free_ino(root);
851                                 mutex_unlock(&root->fs_commit_mutex);
852
853                                 btrfs_set_root_node(&root->root_item,
854                                                     root->node);
855                         }
856
857                         err = btrfs_update_root(trans, fs_info->tree_root,
858                                                 &root->root_key,
859                                                 &root->root_item);
860                         spin_lock(&fs_info->fs_roots_radix_lock);
861                         if (err)
862                                 break;
863                 }
864         }
865         spin_unlock(&fs_info->fs_roots_radix_lock);
866         return err;
867 }
868
869 /*
870  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
871  * otherwise every leaf in the btree is read and defragged.
872  */
873 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
874 {
875         struct btrfs_fs_info *info = root->fs_info;
876         struct btrfs_trans_handle *trans;
877         int ret;
878         unsigned long nr;
879
880         if (xchg(&root->defrag_running, 1))
881                 return 0;
882
883         while (1) {
884                 trans = btrfs_start_transaction(root, 0);
885                 if (IS_ERR(trans))
886                         return PTR_ERR(trans);
887
888                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
889
890                 nr = trans->blocks_used;
891                 btrfs_end_transaction(trans, root);
892                 btrfs_btree_balance_dirty(info->tree_root, nr);
893                 cond_resched();
894
895                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
896                         break;
897         }
898         root->defrag_running = 0;
899         return ret;
900 }
901
902 /*
903  * new snapshots need to be created at a very specific time in the
904  * transaction commit.  This does the actual creation
905  */
906 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
907                                    struct btrfs_fs_info *fs_info,
908                                    struct btrfs_pending_snapshot *pending)
909 {
910         struct btrfs_key key;
911         struct btrfs_root_item *new_root_item;
912         struct btrfs_root *tree_root = fs_info->tree_root;
913         struct btrfs_root *root = pending->root;
914         struct btrfs_root *parent_root;
915         struct btrfs_block_rsv *rsv;
916         struct inode *parent_inode;
917         struct dentry *parent;
918         struct dentry *dentry;
919         struct extent_buffer *tmp;
920         struct extent_buffer *old;
921         int ret;
922         u64 to_reserve = 0;
923         u64 index = 0;
924         u64 objectid;
925         u64 root_flags;
926
927         rsv = trans->block_rsv;
928
929         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
930         if (!new_root_item) {
931                 ret = pending->error = -ENOMEM;
932                 goto fail;
933         }
934
935         ret = btrfs_find_free_objectid(tree_root, &objectid);
936         if (ret) {
937                 pending->error = ret;
938                 goto fail;
939         }
940
941         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
942
943         if (to_reserve > 0) {
944                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
945                                                   to_reserve);
946                 if (ret) {
947                         pending->error = ret;
948                         goto fail;
949                 }
950         }
951
952         key.objectid = objectid;
953         key.offset = (u64)-1;
954         key.type = BTRFS_ROOT_ITEM_KEY;
955
956         trans->block_rsv = &pending->block_rsv;
957
958         dentry = pending->dentry;
959         parent = dget_parent(dentry);
960         parent_inode = parent->d_inode;
961         parent_root = BTRFS_I(parent_inode)->root;
962         record_root_in_trans(trans, parent_root);
963
964         /*
965          * insert the directory item
966          */
967         ret = btrfs_set_inode_index(parent_inode, &index);
968         BUG_ON(ret); /* -ENOMEM */
969         ret = btrfs_insert_dir_item(trans, parent_root,
970                                 dentry->d_name.name, dentry->d_name.len,
971                                 parent_inode, &key,
972                                 BTRFS_FT_DIR, index);
973         if (ret == -EEXIST) {
974                 pending->error = -EEXIST;
975                 dput(parent);
976                 goto fail;
977         } else if (ret) {
978                 goto abort_trans_dput;
979         }
980
981         btrfs_i_size_write(parent_inode, parent_inode->i_size +
982                                          dentry->d_name.len * 2);
983         ret = btrfs_update_inode(trans, parent_root, parent_inode);
984         if (ret)
985                 goto abort_trans_dput;
986
987         /*
988          * pull in the delayed directory update
989          * and the delayed inode item
990          * otherwise we corrupt the FS during
991          * snapshot
992          */
993         ret = btrfs_run_delayed_items(trans, root);
994         if (ret) { /* Transaction aborted */
995                 dput(parent);
996                 goto fail;
997         }
998
999         record_root_in_trans(trans, root);
1000         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1001         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1002         btrfs_check_and_init_root_item(new_root_item);
1003
1004         root_flags = btrfs_root_flags(new_root_item);
1005         if (pending->readonly)
1006                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1007         else
1008                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1009         btrfs_set_root_flags(new_root_item, root_flags);
1010
1011         old = btrfs_lock_root_node(root);
1012         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1013         if (ret) {
1014                 btrfs_tree_unlock(old);
1015                 free_extent_buffer(old);
1016                 goto abort_trans_dput;
1017         }
1018
1019         btrfs_set_lock_blocking(old);
1020
1021         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1022         /* clean up in any case */
1023         btrfs_tree_unlock(old);
1024         free_extent_buffer(old);
1025         if (ret)
1026                 goto abort_trans_dput;
1027
1028         /* see comments in should_cow_block() */
1029         root->force_cow = 1;
1030         smp_wmb();
1031
1032         btrfs_set_root_node(new_root_item, tmp);
1033         /* record when the snapshot was created in key.offset */
1034         key.offset = trans->transid;
1035         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1036         btrfs_tree_unlock(tmp);
1037         free_extent_buffer(tmp);
1038         if (ret)
1039                 goto abort_trans_dput;
1040
1041         /*
1042          * insert root back/forward references
1043          */
1044         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1045                                  parent_root->root_key.objectid,
1046                                  btrfs_ino(parent_inode), index,
1047                                  dentry->d_name.name, dentry->d_name.len);
1048         dput(parent);
1049         if (ret)
1050                 goto fail;
1051
1052         key.offset = (u64)-1;
1053         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1054         if (IS_ERR(pending->snap)) {
1055                 ret = PTR_ERR(pending->snap);
1056                 goto abort_trans;
1057         }
1058
1059         ret = btrfs_reloc_post_snapshot(trans, pending);
1060         if (ret)
1061                 goto abort_trans;
1062         ret = 0;
1063 fail:
1064         kfree(new_root_item);
1065         trans->block_rsv = rsv;
1066         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1067         return ret;
1068
1069 abort_trans_dput:
1070         dput(parent);
1071 abort_trans:
1072         btrfs_abort_transaction(trans, root, ret);
1073         goto fail;
1074 }
1075
1076 /*
1077  * create all the snapshots we've scheduled for creation
1078  */
1079 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1080                                              struct btrfs_fs_info *fs_info)
1081 {
1082         struct btrfs_pending_snapshot *pending;
1083         struct list_head *head = &trans->transaction->pending_snapshots;
1084
1085         list_for_each_entry(pending, head, list)
1086                 create_pending_snapshot(trans, fs_info, pending);
1087         return 0;
1088 }
1089
1090 static void update_super_roots(struct btrfs_root *root)
1091 {
1092         struct btrfs_root_item *root_item;
1093         struct btrfs_super_block *super;
1094
1095         super = root->fs_info->super_copy;
1096
1097         root_item = &root->fs_info->chunk_root->root_item;
1098         super->chunk_root = root_item->bytenr;
1099         super->chunk_root_generation = root_item->generation;
1100         super->chunk_root_level = root_item->level;
1101
1102         root_item = &root->fs_info->tree_root->root_item;
1103         super->root = root_item->bytenr;
1104         super->generation = root_item->generation;
1105         super->root_level = root_item->level;
1106         if (btrfs_test_opt(root, SPACE_CACHE))
1107                 super->cache_generation = root_item->generation;
1108 }
1109
1110 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1111 {
1112         int ret = 0;
1113         spin_lock(&info->trans_lock);
1114         if (info->running_transaction)
1115                 ret = info->running_transaction->in_commit;
1116         spin_unlock(&info->trans_lock);
1117         return ret;
1118 }
1119
1120 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1121 {
1122         int ret = 0;
1123         spin_lock(&info->trans_lock);
1124         if (info->running_transaction)
1125                 ret = info->running_transaction->blocked;
1126         spin_unlock(&info->trans_lock);
1127         return ret;
1128 }
1129
1130 /*
1131  * wait for the current transaction commit to start and block subsequent
1132  * transaction joins
1133  */
1134 static void wait_current_trans_commit_start(struct btrfs_root *root,
1135                                             struct btrfs_transaction *trans)
1136 {
1137         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1138 }
1139
1140 /*
1141  * wait for the current transaction to start and then become unblocked.
1142  * caller holds ref.
1143  */
1144 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1145                                          struct btrfs_transaction *trans)
1146 {
1147         wait_event(root->fs_info->transaction_wait,
1148                    trans->commit_done || (trans->in_commit && !trans->blocked));
1149 }
1150
1151 /*
1152  * commit transactions asynchronously. once btrfs_commit_transaction_async
1153  * returns, any subsequent transaction will not be allowed to join.
1154  */
1155 struct btrfs_async_commit {
1156         struct btrfs_trans_handle *newtrans;
1157         struct btrfs_root *root;
1158         struct delayed_work work;
1159 };
1160
1161 static void do_async_commit(struct work_struct *work)
1162 {
1163         struct btrfs_async_commit *ac =
1164                 container_of(work, struct btrfs_async_commit, work.work);
1165
1166         btrfs_commit_transaction(ac->newtrans, ac->root);
1167         kfree(ac);
1168 }
1169
1170 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1171                                    struct btrfs_root *root,
1172                                    int wait_for_unblock)
1173 {
1174         struct btrfs_async_commit *ac;
1175         struct btrfs_transaction *cur_trans;
1176
1177         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1178         if (!ac)
1179                 return -ENOMEM;
1180
1181         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1182         ac->root = root;
1183         ac->newtrans = btrfs_join_transaction(root);
1184         if (IS_ERR(ac->newtrans)) {
1185                 int err = PTR_ERR(ac->newtrans);
1186                 kfree(ac);
1187                 return err;
1188         }
1189
1190         /* take transaction reference */
1191         cur_trans = trans->transaction;
1192         atomic_inc(&cur_trans->use_count);
1193
1194         btrfs_end_transaction(trans, root);
1195         schedule_delayed_work(&ac->work, 0);
1196
1197         /* wait for transaction to start and unblock */
1198         if (wait_for_unblock)
1199                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1200         else
1201                 wait_current_trans_commit_start(root, cur_trans);
1202
1203         if (current->journal_info == trans)
1204                 current->journal_info = NULL;
1205
1206         put_transaction(cur_trans);
1207         return 0;
1208 }
1209
1210
1211 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1212                                 struct btrfs_root *root, int err)
1213 {
1214         struct btrfs_transaction *cur_trans = trans->transaction;
1215
1216         WARN_ON(trans->use_count > 1);
1217
1218         btrfs_abort_transaction(trans, root, err);
1219
1220         spin_lock(&root->fs_info->trans_lock);
1221         list_del_init(&cur_trans->list);
1222         if (cur_trans == root->fs_info->running_transaction) {
1223                 root->fs_info->running_transaction = NULL;
1224                 root->fs_info->trans_no_join = 0;
1225         }
1226         spin_unlock(&root->fs_info->trans_lock);
1227
1228         btrfs_cleanup_one_transaction(trans->transaction, root);
1229
1230         put_transaction(cur_trans);
1231         put_transaction(cur_trans);
1232
1233         trace_btrfs_transaction_commit(root);
1234
1235         btrfs_scrub_continue(root);
1236
1237         if (current->journal_info == trans)
1238                 current->journal_info = NULL;
1239
1240         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1241 }
1242
1243 /*
1244  * btrfs_transaction state sequence:
1245  *    in_commit = 0, blocked = 0  (initial)
1246  *    in_commit = 1, blocked = 1
1247  *    blocked = 0
1248  *    commit_done = 1
1249  */
1250 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1251                              struct btrfs_root *root)
1252 {
1253         unsigned long joined = 0;
1254         struct btrfs_transaction *cur_trans = trans->transaction;
1255         struct btrfs_transaction *prev_trans = NULL;
1256         DEFINE_WAIT(wait);
1257         int ret = -EIO;
1258         int should_grow = 0;
1259         unsigned long now = get_seconds();
1260         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1261
1262         btrfs_run_ordered_operations(root, 0);
1263
1264         if (cur_trans->aborted)
1265                 goto cleanup_transaction;
1266
1267         /* make a pass through all the delayed refs we have so far
1268          * any runnings procs may add more while we are here
1269          */
1270         ret = btrfs_run_delayed_refs(trans, root, 0);
1271         if (ret)
1272                 goto cleanup_transaction;
1273
1274         btrfs_trans_release_metadata(trans, root);
1275         trans->block_rsv = NULL;
1276
1277         cur_trans = trans->transaction;
1278
1279         /*
1280          * set the flushing flag so procs in this transaction have to
1281          * start sending their work down.
1282          */
1283         cur_trans->delayed_refs.flushing = 1;
1284
1285         ret = btrfs_run_delayed_refs(trans, root, 0);
1286         if (ret)
1287                 goto cleanup_transaction;
1288
1289         spin_lock(&cur_trans->commit_lock);
1290         if (cur_trans->in_commit) {
1291                 spin_unlock(&cur_trans->commit_lock);
1292                 atomic_inc(&cur_trans->use_count);
1293                 ret = btrfs_end_transaction(trans, root);
1294
1295                 wait_for_commit(root, cur_trans);
1296
1297                 put_transaction(cur_trans);
1298
1299                 return ret;
1300         }
1301
1302         trans->transaction->in_commit = 1;
1303         trans->transaction->blocked = 1;
1304         spin_unlock(&cur_trans->commit_lock);
1305         wake_up(&root->fs_info->transaction_blocked_wait);
1306
1307         spin_lock(&root->fs_info->trans_lock);
1308         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1309                 prev_trans = list_entry(cur_trans->list.prev,
1310                                         struct btrfs_transaction, list);
1311                 if (!prev_trans->commit_done) {
1312                         atomic_inc(&prev_trans->use_count);
1313                         spin_unlock(&root->fs_info->trans_lock);
1314
1315                         wait_for_commit(root, prev_trans);
1316
1317                         put_transaction(prev_trans);
1318                 } else {
1319                         spin_unlock(&root->fs_info->trans_lock);
1320                 }
1321         } else {
1322                 spin_unlock(&root->fs_info->trans_lock);
1323         }
1324
1325         if (!btrfs_test_opt(root, SSD) &&
1326             (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1327                 should_grow = 1;
1328
1329         do {
1330                 int snap_pending = 0;
1331
1332                 joined = cur_trans->num_joined;
1333                 if (!list_empty(&trans->transaction->pending_snapshots))
1334                         snap_pending = 1;
1335
1336                 WARN_ON(cur_trans != trans->transaction);
1337
1338                 if (flush_on_commit || snap_pending) {
1339                         btrfs_start_delalloc_inodes(root, 1);
1340                         btrfs_wait_ordered_extents(root, 0, 1);
1341                 }
1342
1343                 ret = btrfs_run_delayed_items(trans, root);
1344                 if (ret)
1345                         goto cleanup_transaction;
1346
1347                 /*
1348                  * rename don't use btrfs_join_transaction, so, once we
1349                  * set the transaction to blocked above, we aren't going
1350                  * to get any new ordered operations.  We can safely run
1351                  * it here and no for sure that nothing new will be added
1352                  * to the list
1353                  */
1354                 btrfs_run_ordered_operations(root, 1);
1355
1356                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1357                                 TASK_UNINTERRUPTIBLE);
1358
1359                 if (atomic_read(&cur_trans->num_writers) > 1)
1360                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1361                 else if (should_grow)
1362                         schedule_timeout(1);
1363
1364                 finish_wait(&cur_trans->writer_wait, &wait);
1365         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1366                  (should_grow && cur_trans->num_joined != joined));
1367
1368         /*
1369          * Ok now we need to make sure to block out any other joins while we
1370          * commit the transaction.  We could have started a join before setting
1371          * no_join so make sure to wait for num_writers to == 1 again.
1372          */
1373         spin_lock(&root->fs_info->trans_lock);
1374         root->fs_info->trans_no_join = 1;
1375         spin_unlock(&root->fs_info->trans_lock);
1376         wait_event(cur_trans->writer_wait,
1377                    atomic_read(&cur_trans->num_writers) == 1);
1378
1379         /*
1380          * the reloc mutex makes sure that we stop
1381          * the balancing code from coming in and moving
1382          * extents around in the middle of the commit
1383          */
1384         mutex_lock(&root->fs_info->reloc_mutex);
1385
1386         ret = btrfs_run_delayed_items(trans, root);
1387         if (ret) {
1388                 mutex_unlock(&root->fs_info->reloc_mutex);
1389                 goto cleanup_transaction;
1390         }
1391
1392         ret = create_pending_snapshots(trans, root->fs_info);
1393         if (ret) {
1394                 mutex_unlock(&root->fs_info->reloc_mutex);
1395                 goto cleanup_transaction;
1396         }
1397
1398         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1399         if (ret) {
1400                 mutex_unlock(&root->fs_info->reloc_mutex);
1401                 goto cleanup_transaction;
1402         }
1403
1404         /*
1405          * make sure none of the code above managed to slip in a
1406          * delayed item
1407          */
1408         btrfs_assert_delayed_root_empty(root);
1409
1410         WARN_ON(cur_trans != trans->transaction);
1411
1412         btrfs_scrub_pause(root);
1413         /* btrfs_commit_tree_roots is responsible for getting the
1414          * various roots consistent with each other.  Every pointer
1415          * in the tree of tree roots has to point to the most up to date
1416          * root for every subvolume and other tree.  So, we have to keep
1417          * the tree logging code from jumping in and changing any
1418          * of the trees.
1419          *
1420          * At this point in the commit, there can't be any tree-log
1421          * writers, but a little lower down we drop the trans mutex
1422          * and let new people in.  By holding the tree_log_mutex
1423          * from now until after the super is written, we avoid races
1424          * with the tree-log code.
1425          */
1426         mutex_lock(&root->fs_info->tree_log_mutex);
1427
1428         ret = commit_fs_roots(trans, root);
1429         if (ret) {
1430                 mutex_unlock(&root->fs_info->tree_log_mutex);
1431                 mutex_unlock(&root->fs_info->reloc_mutex);
1432                 goto cleanup_transaction;
1433         }
1434
1435         /* commit_fs_roots gets rid of all the tree log roots, it is now
1436          * safe to free the root of tree log roots
1437          */
1438         btrfs_free_log_root_tree(trans, root->fs_info);
1439
1440         ret = commit_cowonly_roots(trans, root);
1441         if (ret) {
1442                 mutex_unlock(&root->fs_info->tree_log_mutex);
1443                 mutex_unlock(&root->fs_info->reloc_mutex);
1444                 goto cleanup_transaction;
1445         }
1446
1447         btrfs_prepare_extent_commit(trans, root);
1448
1449         cur_trans = root->fs_info->running_transaction;
1450
1451         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1452                             root->fs_info->tree_root->node);
1453         switch_commit_root(root->fs_info->tree_root);
1454
1455         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1456                             root->fs_info->chunk_root->node);
1457         switch_commit_root(root->fs_info->chunk_root);
1458
1459         update_super_roots(root);
1460
1461         if (!root->fs_info->log_root_recovering) {
1462                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1463                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1464         }
1465
1466         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1467                sizeof(*root->fs_info->super_copy));
1468
1469         trans->transaction->blocked = 0;
1470         spin_lock(&root->fs_info->trans_lock);
1471         root->fs_info->running_transaction = NULL;
1472         root->fs_info->trans_no_join = 0;
1473         spin_unlock(&root->fs_info->trans_lock);
1474         mutex_unlock(&root->fs_info->reloc_mutex);
1475
1476         wake_up(&root->fs_info->transaction_wait);
1477
1478         ret = btrfs_write_and_wait_transaction(trans, root);
1479         if (ret) {
1480                 btrfs_error(root->fs_info, ret,
1481                             "Error while writing out transaction.");
1482                 mutex_unlock(&root->fs_info->tree_log_mutex);
1483                 goto cleanup_transaction;
1484         }
1485
1486         ret = write_ctree_super(trans, root, 0);
1487         if (ret) {
1488                 mutex_unlock(&root->fs_info->tree_log_mutex);
1489                 goto cleanup_transaction;
1490         }
1491
1492         /*
1493          * the super is written, we can safely allow the tree-loggers
1494          * to go about their business
1495          */
1496         mutex_unlock(&root->fs_info->tree_log_mutex);
1497
1498         btrfs_finish_extent_commit(trans, root);
1499
1500         cur_trans->commit_done = 1;
1501
1502         root->fs_info->last_trans_committed = cur_trans->transid;
1503
1504         wake_up(&cur_trans->commit_wait);
1505
1506         spin_lock(&root->fs_info->trans_lock);
1507         list_del_init(&cur_trans->list);
1508         spin_unlock(&root->fs_info->trans_lock);
1509
1510         put_transaction(cur_trans);
1511         put_transaction(cur_trans);
1512
1513         trace_btrfs_transaction_commit(root);
1514
1515         btrfs_scrub_continue(root);
1516
1517         if (current->journal_info == trans)
1518                 current->journal_info = NULL;
1519
1520         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1521
1522         if (current != root->fs_info->transaction_kthread)
1523                 btrfs_run_delayed_iputs(root);
1524
1525         return ret;
1526
1527 cleanup_transaction:
1528         btrfs_trans_release_metadata(trans, root);
1529         trans->block_rsv = NULL;
1530         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1531 //      WARN_ON(1);
1532         if (current->journal_info == trans)
1533                 current->journal_info = NULL;
1534         cleanup_transaction(trans, root, ret);
1535
1536         return ret;
1537 }
1538
1539 /*
1540  * interface function to delete all the snapshots we have scheduled for deletion
1541  */
1542 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1543 {
1544         LIST_HEAD(list);
1545         struct btrfs_fs_info *fs_info = root->fs_info;
1546
1547         spin_lock(&fs_info->trans_lock);
1548         list_splice_init(&fs_info->dead_roots, &list);
1549         spin_unlock(&fs_info->trans_lock);
1550
1551         while (!list_empty(&list)) {
1552                 int ret;
1553
1554                 root = list_entry(list.next, struct btrfs_root, root_list);
1555                 list_del(&root->root_list);
1556
1557                 btrfs_kill_all_delayed_nodes(root);
1558
1559                 if (btrfs_header_backref_rev(root->node) <
1560                     BTRFS_MIXED_BACKREF_REV)
1561                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1562                 else
1563                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1564                 BUG_ON(ret < 0);
1565         }
1566         return 0;
1567 }