]> git.openfabrics.org - ~shefty/rdma-dev.git/blob - fs/btrfs/inode.c
88f9df7bfdaee77dcc65b223b8afbc0b61ee35d2
[~shefty/rdma-dev.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
40 #include "compat.h"
41 #include "ctree.h"
42 #include "disk-io.h"
43 #include "transaction.h"
44 #include "btrfs_inode.h"
45 #include "ioctl.h"
46 #include "print-tree.h"
47 #include "volumes.h"
48 #include "ordered-data.h"
49 #include "xattr.h"
50 #include "tree-log.h"
51 #include "compression.h"
52 #include "locking.h"
53
54 struct btrfs_iget_args {
55         u64 ino;
56         struct btrfs_root *root;
57 };
58
59 static struct inode_operations btrfs_dir_inode_operations;
60 static struct inode_operations btrfs_symlink_inode_operations;
61 static struct inode_operations btrfs_dir_ro_inode_operations;
62 static struct inode_operations btrfs_special_inode_operations;
63 static struct inode_operations btrfs_file_inode_operations;
64 static struct address_space_operations btrfs_aops;
65 static struct address_space_operations btrfs_symlink_aops;
66 static struct file_operations btrfs_dir_file_operations;
67 static struct extent_io_ops btrfs_extent_io_ops;
68
69 static struct kmem_cache *btrfs_inode_cachep;
70 struct kmem_cache *btrfs_trans_handle_cachep;
71 struct kmem_cache *btrfs_transaction_cachep;
72 struct kmem_cache *btrfs_path_cachep;
73
74 #define S_SHIFT 12
75 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
76         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
77         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
78         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
79         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
80         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
81         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
82         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
83 };
84
85 static void btrfs_truncate(struct inode *inode);
86 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
87 static noinline int cow_file_range(struct inode *inode,
88                                    struct page *locked_page,
89                                    u64 start, u64 end, int *page_started,
90                                    unsigned long *nr_written, int unlock);
91
92 static int btrfs_init_inode_security(struct inode *inode,  struct inode *dir)
93 {
94         int err;
95
96         err = btrfs_init_acl(inode, dir);
97         if (!err)
98                 err = btrfs_xattr_security_init(inode, dir);
99         return err;
100 }
101
102 /*
103  * this does all the hard work for inserting an inline extent into
104  * the btree.  The caller should have done a btrfs_drop_extents so that
105  * no overlapping inline items exist in the btree
106  */
107 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
108                                 struct btrfs_root *root, struct inode *inode,
109                                 u64 start, size_t size, size_t compressed_size,
110                                 struct page **compressed_pages)
111 {
112         struct btrfs_key key;
113         struct btrfs_path *path;
114         struct extent_buffer *leaf;
115         struct page *page = NULL;
116         char *kaddr;
117         unsigned long ptr;
118         struct btrfs_file_extent_item *ei;
119         int err = 0;
120         int ret;
121         size_t cur_size = size;
122         size_t datasize;
123         unsigned long offset;
124         int use_compress = 0;
125
126         if (compressed_size && compressed_pages) {
127                 use_compress = 1;
128                 cur_size = compressed_size;
129         }
130
131         path = btrfs_alloc_path();
132         if (!path)
133                 return -ENOMEM;
134
135         path->leave_spinning = 1;
136         btrfs_set_trans_block_group(trans, inode);
137
138         key.objectid = inode->i_ino;
139         key.offset = start;
140         btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
141         datasize = btrfs_file_extent_calc_inline_size(cur_size);
142
143         inode_add_bytes(inode, size);
144         ret = btrfs_insert_empty_item(trans, root, path, &key,
145                                       datasize);
146         BUG_ON(ret);
147         if (ret) {
148                 err = ret;
149                 goto fail;
150         }
151         leaf = path->nodes[0];
152         ei = btrfs_item_ptr(leaf, path->slots[0],
153                             struct btrfs_file_extent_item);
154         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
155         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
156         btrfs_set_file_extent_encryption(leaf, ei, 0);
157         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
158         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
159         ptr = btrfs_file_extent_inline_start(ei);
160
161         if (use_compress) {
162                 struct page *cpage;
163                 int i = 0;
164                 while (compressed_size > 0) {
165                         cpage = compressed_pages[i];
166                         cur_size = min_t(unsigned long, compressed_size,
167                                        PAGE_CACHE_SIZE);
168
169                         kaddr = kmap_atomic(cpage, KM_USER0);
170                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
171                         kunmap_atomic(kaddr, KM_USER0);
172
173                         i++;
174                         ptr += cur_size;
175                         compressed_size -= cur_size;
176                 }
177                 btrfs_set_file_extent_compression(leaf, ei,
178                                                   BTRFS_COMPRESS_ZLIB);
179         } else {
180                 page = find_get_page(inode->i_mapping,
181                                      start >> PAGE_CACHE_SHIFT);
182                 btrfs_set_file_extent_compression(leaf, ei, 0);
183                 kaddr = kmap_atomic(page, KM_USER0);
184                 offset = start & (PAGE_CACHE_SIZE - 1);
185                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
186                 kunmap_atomic(kaddr, KM_USER0);
187                 page_cache_release(page);
188         }
189         btrfs_mark_buffer_dirty(leaf);
190         btrfs_free_path(path);
191
192         BTRFS_I(inode)->disk_i_size = inode->i_size;
193         btrfs_update_inode(trans, root, inode);
194         return 0;
195 fail:
196         btrfs_free_path(path);
197         return err;
198 }
199
200
201 /*
202  * conditionally insert an inline extent into the file.  This
203  * does the checks required to make sure the data is small enough
204  * to fit as an inline extent.
205  */
206 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
207                                  struct btrfs_root *root,
208                                  struct inode *inode, u64 start, u64 end,
209                                  size_t compressed_size,
210                                  struct page **compressed_pages)
211 {
212         u64 isize = i_size_read(inode);
213         u64 actual_end = min(end + 1, isize);
214         u64 inline_len = actual_end - start;
215         u64 aligned_end = (end + root->sectorsize - 1) &
216                         ~((u64)root->sectorsize - 1);
217         u64 hint_byte;
218         u64 data_len = inline_len;
219         int ret;
220
221         if (compressed_size)
222                 data_len = compressed_size;
223
224         if (start > 0 ||
225             actual_end >= PAGE_CACHE_SIZE ||
226             data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
227             (!compressed_size &&
228             (actual_end & (root->sectorsize - 1)) == 0) ||
229             end + 1 < isize ||
230             data_len > root->fs_info->max_inline) {
231                 return 1;
232         }
233
234         ret = btrfs_drop_extents(trans, root, inode, start,
235                                  aligned_end, aligned_end, start,
236                                  &hint_byte, 1);
237         BUG_ON(ret);
238
239         if (isize > actual_end)
240                 inline_len = min_t(u64, isize, actual_end);
241         ret = insert_inline_extent(trans, root, inode, start,
242                                    inline_len, compressed_size,
243                                    compressed_pages);
244         BUG_ON(ret);
245         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
246         return 0;
247 }
248
249 struct async_extent {
250         u64 start;
251         u64 ram_size;
252         u64 compressed_size;
253         struct page **pages;
254         unsigned long nr_pages;
255         struct list_head list;
256 };
257
258 struct async_cow {
259         struct inode *inode;
260         struct btrfs_root *root;
261         struct page *locked_page;
262         u64 start;
263         u64 end;
264         struct list_head extents;
265         struct btrfs_work work;
266 };
267
268 static noinline int add_async_extent(struct async_cow *cow,
269                                      u64 start, u64 ram_size,
270                                      u64 compressed_size,
271                                      struct page **pages,
272                                      unsigned long nr_pages)
273 {
274         struct async_extent *async_extent;
275
276         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
277         async_extent->start = start;
278         async_extent->ram_size = ram_size;
279         async_extent->compressed_size = compressed_size;
280         async_extent->pages = pages;
281         async_extent->nr_pages = nr_pages;
282         list_add_tail(&async_extent->list, &cow->extents);
283         return 0;
284 }
285
286 /*
287  * we create compressed extents in two phases.  The first
288  * phase compresses a range of pages that have already been
289  * locked (both pages and state bits are locked).
290  *
291  * This is done inside an ordered work queue, and the compression
292  * is spread across many cpus.  The actual IO submission is step
293  * two, and the ordered work queue takes care of making sure that
294  * happens in the same order things were put onto the queue by
295  * writepages and friends.
296  *
297  * If this code finds it can't get good compression, it puts an
298  * entry onto the work queue to write the uncompressed bytes.  This
299  * makes sure that both compressed inodes and uncompressed inodes
300  * are written in the same order that pdflush sent them down.
301  */
302 static noinline int compress_file_range(struct inode *inode,
303                                         struct page *locked_page,
304                                         u64 start, u64 end,
305                                         struct async_cow *async_cow,
306                                         int *num_added)
307 {
308         struct btrfs_root *root = BTRFS_I(inode)->root;
309         struct btrfs_trans_handle *trans;
310         u64 num_bytes;
311         u64 orig_start;
312         u64 disk_num_bytes;
313         u64 blocksize = root->sectorsize;
314         u64 actual_end;
315         u64 isize = i_size_read(inode);
316         int ret = 0;
317         struct page **pages = NULL;
318         unsigned long nr_pages;
319         unsigned long nr_pages_ret = 0;
320         unsigned long total_compressed = 0;
321         unsigned long total_in = 0;
322         unsigned long max_compressed = 128 * 1024;
323         unsigned long max_uncompressed = 128 * 1024;
324         int i;
325         int will_compress;
326
327         orig_start = start;
328
329         actual_end = min_t(u64, isize, end + 1);
330 again:
331         will_compress = 0;
332         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
333         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
334
335         /*
336          * we don't want to send crud past the end of i_size through
337          * compression, that's just a waste of CPU time.  So, if the
338          * end of the file is before the start of our current
339          * requested range of bytes, we bail out to the uncompressed
340          * cleanup code that can deal with all of this.
341          *
342          * It isn't really the fastest way to fix things, but this is a
343          * very uncommon corner.
344          */
345         if (actual_end <= start)
346                 goto cleanup_and_bail_uncompressed;
347
348         total_compressed = actual_end - start;
349
350         /* we want to make sure that amount of ram required to uncompress
351          * an extent is reasonable, so we limit the total size in ram
352          * of a compressed extent to 128k.  This is a crucial number
353          * because it also controls how easily we can spread reads across
354          * cpus for decompression.
355          *
356          * We also want to make sure the amount of IO required to do
357          * a random read is reasonably small, so we limit the size of
358          * a compressed extent to 128k.
359          */
360         total_compressed = min(total_compressed, max_uncompressed);
361         num_bytes = (end - start + blocksize) & ~(blocksize - 1);
362         num_bytes = max(blocksize,  num_bytes);
363         disk_num_bytes = num_bytes;
364         total_in = 0;
365         ret = 0;
366
367         /*
368          * we do compression for mount -o compress and when the
369          * inode has not been flagged as nocompress.  This flag can
370          * change at any time if we discover bad compression ratios.
371          */
372         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
373             btrfs_test_opt(root, COMPRESS)) {
374                 WARN_ON(pages);
375                 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
376
377                 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
378                                                 total_compressed, pages,
379                                                 nr_pages, &nr_pages_ret,
380                                                 &total_in,
381                                                 &total_compressed,
382                                                 max_compressed);
383
384                 if (!ret) {
385                         unsigned long offset = total_compressed &
386                                 (PAGE_CACHE_SIZE - 1);
387                         struct page *page = pages[nr_pages_ret - 1];
388                         char *kaddr;
389
390                         /* zero the tail end of the last page, we might be
391                          * sending it down to disk
392                          */
393                         if (offset) {
394                                 kaddr = kmap_atomic(page, KM_USER0);
395                                 memset(kaddr + offset, 0,
396                                        PAGE_CACHE_SIZE - offset);
397                                 kunmap_atomic(kaddr, KM_USER0);
398                         }
399                         will_compress = 1;
400                 }
401         }
402         if (start == 0) {
403                 trans = btrfs_join_transaction(root, 1);
404                 BUG_ON(!trans);
405                 btrfs_set_trans_block_group(trans, inode);
406
407                 /* lets try to make an inline extent */
408                 if (ret || total_in < (actual_end - start)) {
409                         /* we didn't compress the entire range, try
410                          * to make an uncompressed inline extent.
411                          */
412                         ret = cow_file_range_inline(trans, root, inode,
413                                                     start, end, 0, NULL);
414                 } else {
415                         /* try making a compressed inline extent */
416                         ret = cow_file_range_inline(trans, root, inode,
417                                                     start, end,
418                                                     total_compressed, pages);
419                 }
420                 btrfs_end_transaction(trans, root);
421                 if (ret == 0) {
422                         /*
423                          * inline extent creation worked, we don't need
424                          * to create any more async work items.  Unlock
425                          * and free up our temp pages.
426                          */
427                         extent_clear_unlock_delalloc(inode,
428                                                      &BTRFS_I(inode)->io_tree,
429                                                      start, end, NULL, 1, 0,
430                                                      0, 1, 1, 1, 0);
431                         ret = 0;
432                         goto free_pages_out;
433                 }
434         }
435
436         if (will_compress) {
437                 /*
438                  * we aren't doing an inline extent round the compressed size
439                  * up to a block size boundary so the allocator does sane
440                  * things
441                  */
442                 total_compressed = (total_compressed + blocksize - 1) &
443                         ~(blocksize - 1);
444
445                 /*
446                  * one last check to make sure the compression is really a
447                  * win, compare the page count read with the blocks on disk
448                  */
449                 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
450                         ~(PAGE_CACHE_SIZE - 1);
451                 if (total_compressed >= total_in) {
452                         will_compress = 0;
453                 } else {
454                         disk_num_bytes = total_compressed;
455                         num_bytes = total_in;
456                 }
457         }
458         if (!will_compress && pages) {
459                 /*
460                  * the compression code ran but failed to make things smaller,
461                  * free any pages it allocated and our page pointer array
462                  */
463                 for (i = 0; i < nr_pages_ret; i++) {
464                         WARN_ON(pages[i]->mapping);
465                         page_cache_release(pages[i]);
466                 }
467                 kfree(pages);
468                 pages = NULL;
469                 total_compressed = 0;
470                 nr_pages_ret = 0;
471
472                 /* flag the file so we don't compress in the future */
473                 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
474         }
475         if (will_compress) {
476                 *num_added += 1;
477
478                 /* the async work queues will take care of doing actual
479                  * allocation on disk for these compressed pages,
480                  * and will submit them to the elevator.
481                  */
482                 add_async_extent(async_cow, start, num_bytes,
483                                  total_compressed, pages, nr_pages_ret);
484
485                 if (start + num_bytes < end && start + num_bytes < actual_end) {
486                         start += num_bytes;
487                         pages = NULL;
488                         cond_resched();
489                         goto again;
490                 }
491         } else {
492 cleanup_and_bail_uncompressed:
493                 /*
494                  * No compression, but we still need to write the pages in
495                  * the file we've been given so far.  redirty the locked
496                  * page if it corresponds to our extent and set things up
497                  * for the async work queue to run cow_file_range to do
498                  * the normal delalloc dance
499                  */
500                 if (page_offset(locked_page) >= start &&
501                     page_offset(locked_page) <= end) {
502                         __set_page_dirty_nobuffers(locked_page);
503                         /* unlocked later on in the async handlers */
504                 }
505                 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
506                 *num_added += 1;
507         }
508
509 out:
510         return 0;
511
512 free_pages_out:
513         for (i = 0; i < nr_pages_ret; i++) {
514                 WARN_ON(pages[i]->mapping);
515                 page_cache_release(pages[i]);
516         }
517         kfree(pages);
518
519         goto out;
520 }
521
522 /*
523  * phase two of compressed writeback.  This is the ordered portion
524  * of the code, which only gets called in the order the work was
525  * queued.  We walk all the async extents created by compress_file_range
526  * and send them down to the disk.
527  */
528 static noinline int submit_compressed_extents(struct inode *inode,
529                                               struct async_cow *async_cow)
530 {
531         struct async_extent *async_extent;
532         u64 alloc_hint = 0;
533         struct btrfs_trans_handle *trans;
534         struct btrfs_key ins;
535         struct extent_map *em;
536         struct btrfs_root *root = BTRFS_I(inode)->root;
537         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
538         struct extent_io_tree *io_tree;
539         int ret;
540
541         if (list_empty(&async_cow->extents))
542                 return 0;
543
544         trans = btrfs_join_transaction(root, 1);
545
546         while (!list_empty(&async_cow->extents)) {
547                 async_extent = list_entry(async_cow->extents.next,
548                                           struct async_extent, list);
549                 list_del(&async_extent->list);
550
551                 io_tree = &BTRFS_I(inode)->io_tree;
552
553                 /* did the compression code fall back to uncompressed IO? */
554                 if (!async_extent->pages) {
555                         int page_started = 0;
556                         unsigned long nr_written = 0;
557
558                         lock_extent(io_tree, async_extent->start,
559                                     async_extent->start +
560                                     async_extent->ram_size - 1, GFP_NOFS);
561
562                         /* allocate blocks */
563                         cow_file_range(inode, async_cow->locked_page,
564                                        async_extent->start,
565                                        async_extent->start +
566                                        async_extent->ram_size - 1,
567                                        &page_started, &nr_written, 0);
568
569                         /*
570                          * if page_started, cow_file_range inserted an
571                          * inline extent and took care of all the unlocking
572                          * and IO for us.  Otherwise, we need to submit
573                          * all those pages down to the drive.
574                          */
575                         if (!page_started)
576                                 extent_write_locked_range(io_tree,
577                                                   inode, async_extent->start,
578                                                   async_extent->start +
579                                                   async_extent->ram_size - 1,
580                                                   btrfs_get_extent,
581                                                   WB_SYNC_ALL);
582                         kfree(async_extent);
583                         cond_resched();
584                         continue;
585                 }
586
587                 lock_extent(io_tree, async_extent->start,
588                             async_extent->start + async_extent->ram_size - 1,
589                             GFP_NOFS);
590                 /*
591                  * here we're doing allocation and writeback of the
592                  * compressed pages
593                  */
594                 btrfs_drop_extent_cache(inode, async_extent->start,
595                                         async_extent->start +
596                                         async_extent->ram_size - 1, 0);
597
598                 ret = btrfs_reserve_extent(trans, root,
599                                            async_extent->compressed_size,
600                                            async_extent->compressed_size,
601                                            0, alloc_hint,
602                                            (u64)-1, &ins, 1);
603                 BUG_ON(ret);
604                 em = alloc_extent_map(GFP_NOFS);
605                 em->start = async_extent->start;
606                 em->len = async_extent->ram_size;
607                 em->orig_start = em->start;
608
609                 em->block_start = ins.objectid;
610                 em->block_len = ins.offset;
611                 em->bdev = root->fs_info->fs_devices->latest_bdev;
612                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
613                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
614
615                 while (1) {
616                         write_lock(&em_tree->lock);
617                         ret = add_extent_mapping(em_tree, em);
618                         write_unlock(&em_tree->lock);
619                         if (ret != -EEXIST) {
620                                 free_extent_map(em);
621                                 break;
622                         }
623                         btrfs_drop_extent_cache(inode, async_extent->start,
624                                                 async_extent->start +
625                                                 async_extent->ram_size - 1, 0);
626                 }
627
628                 ret = btrfs_add_ordered_extent(inode, async_extent->start,
629                                                ins.objectid,
630                                                async_extent->ram_size,
631                                                ins.offset,
632                                                BTRFS_ORDERED_COMPRESSED);
633                 BUG_ON(ret);
634
635                 btrfs_end_transaction(trans, root);
636
637                 /*
638                  * clear dirty, set writeback and unlock the pages.
639                  */
640                 extent_clear_unlock_delalloc(inode,
641                                              &BTRFS_I(inode)->io_tree,
642                                              async_extent->start,
643                                              async_extent->start +
644                                              async_extent->ram_size - 1,
645                                              NULL, 1, 1, 0, 1, 1, 0, 0);
646
647                 ret = btrfs_submit_compressed_write(inode,
648                                     async_extent->start,
649                                     async_extent->ram_size,
650                                     ins.objectid,
651                                     ins.offset, async_extent->pages,
652                                     async_extent->nr_pages);
653
654                 BUG_ON(ret);
655                 trans = btrfs_join_transaction(root, 1);
656                 alloc_hint = ins.objectid + ins.offset;
657                 kfree(async_extent);
658                 cond_resched();
659         }
660
661         btrfs_end_transaction(trans, root);
662         return 0;
663 }
664
665 /*
666  * when extent_io.c finds a delayed allocation range in the file,
667  * the call backs end up in this code.  The basic idea is to
668  * allocate extents on disk for the range, and create ordered data structs
669  * in ram to track those extents.
670  *
671  * locked_page is the page that writepage had locked already.  We use
672  * it to make sure we don't do extra locks or unlocks.
673  *
674  * *page_started is set to one if we unlock locked_page and do everything
675  * required to start IO on it.  It may be clean and already done with
676  * IO when we return.
677  */
678 static noinline int cow_file_range(struct inode *inode,
679                                    struct page *locked_page,
680                                    u64 start, u64 end, int *page_started,
681                                    unsigned long *nr_written,
682                                    int unlock)
683 {
684         struct btrfs_root *root = BTRFS_I(inode)->root;
685         struct btrfs_trans_handle *trans;
686         u64 alloc_hint = 0;
687         u64 num_bytes;
688         unsigned long ram_size;
689         u64 disk_num_bytes;
690         u64 cur_alloc_size;
691         u64 blocksize = root->sectorsize;
692         u64 actual_end;
693         u64 isize = i_size_read(inode);
694         struct btrfs_key ins;
695         struct extent_map *em;
696         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
697         int ret = 0;
698
699         trans = btrfs_join_transaction(root, 1);
700         BUG_ON(!trans);
701         btrfs_set_trans_block_group(trans, inode);
702
703         actual_end = min_t(u64, isize, end + 1);
704
705         num_bytes = (end - start + blocksize) & ~(blocksize - 1);
706         num_bytes = max(blocksize,  num_bytes);
707         disk_num_bytes = num_bytes;
708         ret = 0;
709
710         if (start == 0) {
711                 /* lets try to make an inline extent */
712                 ret = cow_file_range_inline(trans, root, inode,
713                                             start, end, 0, NULL);
714                 if (ret == 0) {
715                         extent_clear_unlock_delalloc(inode,
716                                                      &BTRFS_I(inode)->io_tree,
717                                                      start, end, NULL, 1, 1,
718                                                      1, 1, 1, 1, 0);
719                         *nr_written = *nr_written +
720                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
721                         *page_started = 1;
722                         ret = 0;
723                         goto out;
724                 }
725         }
726
727         BUG_ON(disk_num_bytes >
728                btrfs_super_total_bytes(&root->fs_info->super_copy));
729
730         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
731
732         while (disk_num_bytes > 0) {
733                 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
734                 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
735                                            root->sectorsize, 0, alloc_hint,
736                                            (u64)-1, &ins, 1);
737                 BUG_ON(ret);
738
739                 em = alloc_extent_map(GFP_NOFS);
740                 em->start = start;
741                 em->orig_start = em->start;
742
743                 ram_size = ins.offset;
744                 em->len = ins.offset;
745
746                 em->block_start = ins.objectid;
747                 em->block_len = ins.offset;
748                 em->bdev = root->fs_info->fs_devices->latest_bdev;
749                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
750
751                 while (1) {
752                         write_lock(&em_tree->lock);
753                         ret = add_extent_mapping(em_tree, em);
754                         write_unlock(&em_tree->lock);
755                         if (ret != -EEXIST) {
756                                 free_extent_map(em);
757                                 break;
758                         }
759                         btrfs_drop_extent_cache(inode, start,
760                                                 start + ram_size - 1, 0);
761                 }
762
763                 cur_alloc_size = ins.offset;
764                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
765                                                ram_size, cur_alloc_size, 0);
766                 BUG_ON(ret);
767
768                 if (root->root_key.objectid ==
769                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
770                         ret = btrfs_reloc_clone_csums(inode, start,
771                                                       cur_alloc_size);
772                         BUG_ON(ret);
773                 }
774
775                 if (disk_num_bytes < cur_alloc_size)
776                         break;
777
778                 /* we're not doing compressed IO, don't unlock the first
779                  * page (which the caller expects to stay locked), don't
780                  * clear any dirty bits and don't set any writeback bits
781                  *
782                  * Do set the Private2 bit so we know this page was properly
783                  * setup for writepage
784                  */
785                 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
786                                              start, start + ram_size - 1,
787                                              locked_page, unlock, 1,
788                                              1, 0, 0, 0, 1);
789                 disk_num_bytes -= cur_alloc_size;
790                 num_bytes -= cur_alloc_size;
791                 alloc_hint = ins.objectid + ins.offset;
792                 start += cur_alloc_size;
793         }
794 out:
795         ret = 0;
796         btrfs_end_transaction(trans, root);
797
798         return ret;
799 }
800
801 /*
802  * work queue call back to started compression on a file and pages
803  */
804 static noinline void async_cow_start(struct btrfs_work *work)
805 {
806         struct async_cow *async_cow;
807         int num_added = 0;
808         async_cow = container_of(work, struct async_cow, work);
809
810         compress_file_range(async_cow->inode, async_cow->locked_page,
811                             async_cow->start, async_cow->end, async_cow,
812                             &num_added);
813         if (num_added == 0)
814                 async_cow->inode = NULL;
815 }
816
817 /*
818  * work queue call back to submit previously compressed pages
819  */
820 static noinline void async_cow_submit(struct btrfs_work *work)
821 {
822         struct async_cow *async_cow;
823         struct btrfs_root *root;
824         unsigned long nr_pages;
825
826         async_cow = container_of(work, struct async_cow, work);
827
828         root = async_cow->root;
829         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
830                 PAGE_CACHE_SHIFT;
831
832         atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
833
834         if (atomic_read(&root->fs_info->async_delalloc_pages) <
835             5 * 1042 * 1024 &&
836             waitqueue_active(&root->fs_info->async_submit_wait))
837                 wake_up(&root->fs_info->async_submit_wait);
838
839         if (async_cow->inode)
840                 submit_compressed_extents(async_cow->inode, async_cow);
841 }
842
843 static noinline void async_cow_free(struct btrfs_work *work)
844 {
845         struct async_cow *async_cow;
846         async_cow = container_of(work, struct async_cow, work);
847         kfree(async_cow);
848 }
849
850 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
851                                 u64 start, u64 end, int *page_started,
852                                 unsigned long *nr_written)
853 {
854         struct async_cow *async_cow;
855         struct btrfs_root *root = BTRFS_I(inode)->root;
856         unsigned long nr_pages;
857         u64 cur_end;
858         int limit = 10 * 1024 * 1042;
859
860         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
861                          EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS);
862         while (start < end) {
863                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
864                 async_cow->inode = inode;
865                 async_cow->root = root;
866                 async_cow->locked_page = locked_page;
867                 async_cow->start = start;
868
869                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
870                         cur_end = end;
871                 else
872                         cur_end = min(end, start + 512 * 1024 - 1);
873
874                 async_cow->end = cur_end;
875                 INIT_LIST_HEAD(&async_cow->extents);
876
877                 async_cow->work.func = async_cow_start;
878                 async_cow->work.ordered_func = async_cow_submit;
879                 async_cow->work.ordered_free = async_cow_free;
880                 async_cow->work.flags = 0;
881
882                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
883                         PAGE_CACHE_SHIFT;
884                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
885
886                 btrfs_queue_worker(&root->fs_info->delalloc_workers,
887                                    &async_cow->work);
888
889                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
890                         wait_event(root->fs_info->async_submit_wait,
891                            (atomic_read(&root->fs_info->async_delalloc_pages) <
892                             limit));
893                 }
894
895                 while (atomic_read(&root->fs_info->async_submit_draining) &&
896                       atomic_read(&root->fs_info->async_delalloc_pages)) {
897                         wait_event(root->fs_info->async_submit_wait,
898                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
899                            0));
900                 }
901
902                 *nr_written += nr_pages;
903                 start = cur_end + 1;
904         }
905         *page_started = 1;
906         return 0;
907 }
908
909 static noinline int csum_exist_in_range(struct btrfs_root *root,
910                                         u64 bytenr, u64 num_bytes)
911 {
912         int ret;
913         struct btrfs_ordered_sum *sums;
914         LIST_HEAD(list);
915
916         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
917                                        bytenr + num_bytes - 1, &list);
918         if (ret == 0 && list_empty(&list))
919                 return 0;
920
921         while (!list_empty(&list)) {
922                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
923                 list_del(&sums->list);
924                 kfree(sums);
925         }
926         return 1;
927 }
928
929 /*
930  * when nowcow writeback call back.  This checks for snapshots or COW copies
931  * of the extents that exist in the file, and COWs the file as required.
932  *
933  * If no cow copies or snapshots exist, we write directly to the existing
934  * blocks on disk
935  */
936 static noinline int run_delalloc_nocow(struct inode *inode,
937                                        struct page *locked_page,
938                               u64 start, u64 end, int *page_started, int force,
939                               unsigned long *nr_written)
940 {
941         struct btrfs_root *root = BTRFS_I(inode)->root;
942         struct btrfs_trans_handle *trans;
943         struct extent_buffer *leaf;
944         struct btrfs_path *path;
945         struct btrfs_file_extent_item *fi;
946         struct btrfs_key found_key;
947         u64 cow_start;
948         u64 cur_offset;
949         u64 extent_end;
950         u64 extent_offset;
951         u64 disk_bytenr;
952         u64 num_bytes;
953         int extent_type;
954         int ret;
955         int type;
956         int nocow;
957         int check_prev = 1;
958
959         path = btrfs_alloc_path();
960         BUG_ON(!path);
961         trans = btrfs_join_transaction(root, 1);
962         BUG_ON(!trans);
963
964         cow_start = (u64)-1;
965         cur_offset = start;
966         while (1) {
967                 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
968                                                cur_offset, 0);
969                 BUG_ON(ret < 0);
970                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
971                         leaf = path->nodes[0];
972                         btrfs_item_key_to_cpu(leaf, &found_key,
973                                               path->slots[0] - 1);
974                         if (found_key.objectid == inode->i_ino &&
975                             found_key.type == BTRFS_EXTENT_DATA_KEY)
976                                 path->slots[0]--;
977                 }
978                 check_prev = 0;
979 next_slot:
980                 leaf = path->nodes[0];
981                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
982                         ret = btrfs_next_leaf(root, path);
983                         if (ret < 0)
984                                 BUG_ON(1);
985                         if (ret > 0)
986                                 break;
987                         leaf = path->nodes[0];
988                 }
989
990                 nocow = 0;
991                 disk_bytenr = 0;
992                 num_bytes = 0;
993                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
994
995                 if (found_key.objectid > inode->i_ino ||
996                     found_key.type > BTRFS_EXTENT_DATA_KEY ||
997                     found_key.offset > end)
998                         break;
999
1000                 if (found_key.offset > cur_offset) {
1001                         extent_end = found_key.offset;
1002                         goto out_check;
1003                 }
1004
1005                 fi = btrfs_item_ptr(leaf, path->slots[0],
1006                                     struct btrfs_file_extent_item);
1007                 extent_type = btrfs_file_extent_type(leaf, fi);
1008
1009                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1010                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1011                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1012                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1013                         extent_end = found_key.offset +
1014                                 btrfs_file_extent_num_bytes(leaf, fi);
1015                         if (extent_end <= start) {
1016                                 path->slots[0]++;
1017                                 goto next_slot;
1018                         }
1019                         if (disk_bytenr == 0)
1020                                 goto out_check;
1021                         if (btrfs_file_extent_compression(leaf, fi) ||
1022                             btrfs_file_extent_encryption(leaf, fi) ||
1023                             btrfs_file_extent_other_encoding(leaf, fi))
1024                                 goto out_check;
1025                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1026                                 goto out_check;
1027                         if (btrfs_extent_readonly(root, disk_bytenr))
1028                                 goto out_check;
1029                         if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1030                                                   found_key.offset -
1031                                                   extent_offset, disk_bytenr))
1032                                 goto out_check;
1033                         disk_bytenr += extent_offset;
1034                         disk_bytenr += cur_offset - found_key.offset;
1035                         num_bytes = min(end + 1, extent_end) - cur_offset;
1036                         /*
1037                          * force cow if csum exists in the range.
1038                          * this ensure that csum for a given extent are
1039                          * either valid or do not exist.
1040                          */
1041                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1042                                 goto out_check;
1043                         nocow = 1;
1044                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1045                         extent_end = found_key.offset +
1046                                 btrfs_file_extent_inline_len(leaf, fi);
1047                         extent_end = ALIGN(extent_end, root->sectorsize);
1048                 } else {
1049                         BUG_ON(1);
1050                 }
1051 out_check:
1052                 if (extent_end <= start) {
1053                         path->slots[0]++;
1054                         goto next_slot;
1055                 }
1056                 if (!nocow) {
1057                         if (cow_start == (u64)-1)
1058                                 cow_start = cur_offset;
1059                         cur_offset = extent_end;
1060                         if (cur_offset > end)
1061                                 break;
1062                         path->slots[0]++;
1063                         goto next_slot;
1064                 }
1065
1066                 btrfs_release_path(root, path);
1067                 if (cow_start != (u64)-1) {
1068                         ret = cow_file_range(inode, locked_page, cow_start,
1069                                         found_key.offset - 1, page_started,
1070                                         nr_written, 1);
1071                         BUG_ON(ret);
1072                         cow_start = (u64)-1;
1073                 }
1074
1075                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1076                         struct extent_map *em;
1077                         struct extent_map_tree *em_tree;
1078                         em_tree = &BTRFS_I(inode)->extent_tree;
1079                         em = alloc_extent_map(GFP_NOFS);
1080                         em->start = cur_offset;
1081                         em->orig_start = em->start;
1082                         em->len = num_bytes;
1083                         em->block_len = num_bytes;
1084                         em->block_start = disk_bytenr;
1085                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1086                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1087                         while (1) {
1088                                 write_lock(&em_tree->lock);
1089                                 ret = add_extent_mapping(em_tree, em);
1090                                 write_unlock(&em_tree->lock);
1091                                 if (ret != -EEXIST) {
1092                                         free_extent_map(em);
1093                                         break;
1094                                 }
1095                                 btrfs_drop_extent_cache(inode, em->start,
1096                                                 em->start + em->len - 1, 0);
1097                         }
1098                         type = BTRFS_ORDERED_PREALLOC;
1099                 } else {
1100                         type = BTRFS_ORDERED_NOCOW;
1101                 }
1102
1103                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1104                                                num_bytes, num_bytes, type);
1105                 BUG_ON(ret);
1106
1107                 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1108                                         cur_offset, cur_offset + num_bytes - 1,
1109                                         locked_page, 1, 1, 1, 0, 0, 0, 1);
1110                 cur_offset = extent_end;
1111                 if (cur_offset > end)
1112                         break;
1113         }
1114         btrfs_release_path(root, path);
1115
1116         if (cur_offset <= end && cow_start == (u64)-1)
1117                 cow_start = cur_offset;
1118         if (cow_start != (u64)-1) {
1119                 ret = cow_file_range(inode, locked_page, cow_start, end,
1120                                      page_started, nr_written, 1);
1121                 BUG_ON(ret);
1122         }
1123
1124         ret = btrfs_end_transaction(trans, root);
1125         BUG_ON(ret);
1126         btrfs_free_path(path);
1127         return 0;
1128 }
1129
1130 /*
1131  * extent_io.c call back to do delayed allocation processing
1132  */
1133 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1134                               u64 start, u64 end, int *page_started,
1135                               unsigned long *nr_written)
1136 {
1137         int ret;
1138         struct btrfs_root *root = BTRFS_I(inode)->root;
1139
1140         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1141                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1142                                          page_started, 1, nr_written);
1143         else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1144                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1145                                          page_started, 0, nr_written);
1146         else if (!btrfs_test_opt(root, COMPRESS))
1147                 ret = cow_file_range(inode, locked_page, start, end,
1148                                       page_started, nr_written, 1);
1149         else
1150                 ret = cow_file_range_async(inode, locked_page, start, end,
1151                                            page_started, nr_written);
1152         return ret;
1153 }
1154
1155 /*
1156  * extent_io.c set_bit_hook, used to track delayed allocation
1157  * bytes in this file, and to maintain the list of inodes that
1158  * have pending delalloc work to be done.
1159  */
1160 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1161                        unsigned long old, unsigned long bits)
1162 {
1163         /*
1164          * set_bit and clear bit hooks normally require _irqsave/restore
1165          * but in this case, we are only testeing for the DELALLOC
1166          * bit, which is only set or cleared with irqs on
1167          */
1168         if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1169                 struct btrfs_root *root = BTRFS_I(inode)->root;
1170                 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1171                 spin_lock(&root->fs_info->delalloc_lock);
1172                 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1173                 root->fs_info->delalloc_bytes += end - start + 1;
1174                 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1175                         list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1176                                       &root->fs_info->delalloc_inodes);
1177                 }
1178                 spin_unlock(&root->fs_info->delalloc_lock);
1179         }
1180         return 0;
1181 }
1182
1183 /*
1184  * extent_io.c clear_bit_hook, see set_bit_hook for why
1185  */
1186 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1187                          unsigned long old, unsigned long bits)
1188 {
1189         /*
1190          * set_bit and clear bit hooks normally require _irqsave/restore
1191          * but in this case, we are only testeing for the DELALLOC
1192          * bit, which is only set or cleared with irqs on
1193          */
1194         if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1195                 struct btrfs_root *root = BTRFS_I(inode)->root;
1196
1197                 spin_lock(&root->fs_info->delalloc_lock);
1198                 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1199                         printk(KERN_INFO "btrfs warning: delalloc account "
1200                                "%llu %llu\n",
1201                                (unsigned long long)end - start + 1,
1202                                (unsigned long long)
1203                                root->fs_info->delalloc_bytes);
1204                         btrfs_delalloc_free_space(root, inode, (u64)-1);
1205                         root->fs_info->delalloc_bytes = 0;
1206                         BTRFS_I(inode)->delalloc_bytes = 0;
1207                 } else {
1208                         btrfs_delalloc_free_space(root, inode,
1209                                                   end - start + 1);
1210                         root->fs_info->delalloc_bytes -= end - start + 1;
1211                         BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1212                 }
1213                 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1214                     !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1215                         list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1216                 }
1217                 spin_unlock(&root->fs_info->delalloc_lock);
1218         }
1219         return 0;
1220 }
1221
1222 /*
1223  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1224  * we don't create bios that span stripes or chunks
1225  */
1226 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1227                          size_t size, struct bio *bio,
1228                          unsigned long bio_flags)
1229 {
1230         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1231         struct btrfs_mapping_tree *map_tree;
1232         u64 logical = (u64)bio->bi_sector << 9;
1233         u64 length = 0;
1234         u64 map_length;
1235         int ret;
1236
1237         if (bio_flags & EXTENT_BIO_COMPRESSED)
1238                 return 0;
1239
1240         length = bio->bi_size;
1241         map_tree = &root->fs_info->mapping_tree;
1242         map_length = length;
1243         ret = btrfs_map_block(map_tree, READ, logical,
1244                               &map_length, NULL, 0);
1245
1246         if (map_length < length + size)
1247                 return 1;
1248         return 0;
1249 }
1250
1251 /*
1252  * in order to insert checksums into the metadata in large chunks,
1253  * we wait until bio submission time.   All the pages in the bio are
1254  * checksummed and sums are attached onto the ordered extent record.
1255  *
1256  * At IO completion time the cums attached on the ordered extent record
1257  * are inserted into the btree
1258  */
1259 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1260                                     struct bio *bio, int mirror_num,
1261                                     unsigned long bio_flags)
1262 {
1263         struct btrfs_root *root = BTRFS_I(inode)->root;
1264         int ret = 0;
1265
1266         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1267         BUG_ON(ret);
1268         return 0;
1269 }
1270
1271 /*
1272  * in order to insert checksums into the metadata in large chunks,
1273  * we wait until bio submission time.   All the pages in the bio are
1274  * checksummed and sums are attached onto the ordered extent record.
1275  *
1276  * At IO completion time the cums attached on the ordered extent record
1277  * are inserted into the btree
1278  */
1279 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1280                           int mirror_num, unsigned long bio_flags)
1281 {
1282         struct btrfs_root *root = BTRFS_I(inode)->root;
1283         return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1284 }
1285
1286 /*
1287  * extent_io.c submission hook. This does the right thing for csum calculation
1288  * on write, or reading the csums from the tree before a read
1289  */
1290 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1291                           int mirror_num, unsigned long bio_flags)
1292 {
1293         struct btrfs_root *root = BTRFS_I(inode)->root;
1294         int ret = 0;
1295         int skip_sum;
1296
1297         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1298
1299         ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1300         BUG_ON(ret);
1301
1302         if (!(rw & (1 << BIO_RW))) {
1303                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1304                         return btrfs_submit_compressed_read(inode, bio,
1305                                                     mirror_num, bio_flags);
1306                 } else if (!skip_sum)
1307                         btrfs_lookup_bio_sums(root, inode, bio, NULL);
1308                 goto mapit;
1309         } else if (!skip_sum) {
1310                 /* csum items have already been cloned */
1311                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1312                         goto mapit;
1313                 /* we're doing a write, do the async checksumming */
1314                 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1315                                    inode, rw, bio, mirror_num,
1316                                    bio_flags, __btrfs_submit_bio_start,
1317                                    __btrfs_submit_bio_done);
1318         }
1319
1320 mapit:
1321         return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1322 }
1323
1324 /*
1325  * given a list of ordered sums record them in the inode.  This happens
1326  * at IO completion time based on sums calculated at bio submission time.
1327  */
1328 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1329                              struct inode *inode, u64 file_offset,
1330                              struct list_head *list)
1331 {
1332         struct btrfs_ordered_sum *sum;
1333
1334         btrfs_set_trans_block_group(trans, inode);
1335
1336         list_for_each_entry(sum, list, list) {
1337                 btrfs_csum_file_blocks(trans,
1338                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1339         }
1340         return 0;
1341 }
1342
1343 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1344 {
1345         if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1346                 WARN_ON(1);
1347         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1348                                    GFP_NOFS);
1349 }
1350
1351 /* see btrfs_writepage_start_hook for details on why this is required */
1352 struct btrfs_writepage_fixup {
1353         struct page *page;
1354         struct btrfs_work work;
1355 };
1356
1357 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1358 {
1359         struct btrfs_writepage_fixup *fixup;
1360         struct btrfs_ordered_extent *ordered;
1361         struct page *page;
1362         struct inode *inode;
1363         u64 page_start;
1364         u64 page_end;
1365
1366         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1367         page = fixup->page;
1368 again:
1369         lock_page(page);
1370         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1371                 ClearPageChecked(page);
1372                 goto out_page;
1373         }
1374
1375         inode = page->mapping->host;
1376         page_start = page_offset(page);
1377         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1378
1379         lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1380
1381         /* already ordered? We're done */
1382         if (PagePrivate2(page))
1383                 goto out;
1384
1385         ordered = btrfs_lookup_ordered_extent(inode, page_start);
1386         if (ordered) {
1387                 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1388                               page_end, GFP_NOFS);
1389                 unlock_page(page);
1390                 btrfs_start_ordered_extent(inode, ordered, 1);
1391                 goto again;
1392         }
1393
1394         btrfs_set_extent_delalloc(inode, page_start, page_end);
1395         ClearPageChecked(page);
1396 out:
1397         unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1398 out_page:
1399         unlock_page(page);
1400         page_cache_release(page);
1401 }
1402
1403 /*
1404  * There are a few paths in the higher layers of the kernel that directly
1405  * set the page dirty bit without asking the filesystem if it is a
1406  * good idea.  This causes problems because we want to make sure COW
1407  * properly happens and the data=ordered rules are followed.
1408  *
1409  * In our case any range that doesn't have the ORDERED bit set
1410  * hasn't been properly setup for IO.  We kick off an async process
1411  * to fix it up.  The async helper will wait for ordered extents, set
1412  * the delalloc bit and make it safe to write the page.
1413  */
1414 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1415 {
1416         struct inode *inode = page->mapping->host;
1417         struct btrfs_writepage_fixup *fixup;
1418         struct btrfs_root *root = BTRFS_I(inode)->root;
1419
1420         /* this page is properly in the ordered list */
1421         if (TestClearPagePrivate2(page))
1422                 return 0;
1423
1424         if (PageChecked(page))
1425                 return -EAGAIN;
1426
1427         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1428         if (!fixup)
1429                 return -EAGAIN;
1430
1431         SetPageChecked(page);
1432         page_cache_get(page);
1433         fixup->work.func = btrfs_writepage_fixup_worker;
1434         fixup->page = page;
1435         btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1436         return -EAGAIN;
1437 }
1438
1439 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1440                                        struct inode *inode, u64 file_pos,
1441                                        u64 disk_bytenr, u64 disk_num_bytes,
1442                                        u64 num_bytes, u64 ram_bytes,
1443                                        u64 locked_end,
1444                                        u8 compression, u8 encryption,
1445                                        u16 other_encoding, int extent_type)
1446 {
1447         struct btrfs_root *root = BTRFS_I(inode)->root;
1448         struct btrfs_file_extent_item *fi;
1449         struct btrfs_path *path;
1450         struct extent_buffer *leaf;
1451         struct btrfs_key ins;
1452         u64 hint;
1453         int ret;
1454
1455         path = btrfs_alloc_path();
1456         BUG_ON(!path);
1457
1458         path->leave_spinning = 1;
1459
1460         /*
1461          * we may be replacing one extent in the tree with another.
1462          * The new extent is pinned in the extent map, and we don't want
1463          * to drop it from the cache until it is completely in the btree.
1464          *
1465          * So, tell btrfs_drop_extents to leave this extent in the cache.
1466          * the caller is expected to unpin it and allow it to be merged
1467          * with the others.
1468          */
1469         ret = btrfs_drop_extents(trans, root, inode, file_pos,
1470                                  file_pos + num_bytes, locked_end,
1471                                  file_pos, &hint, 0);
1472         BUG_ON(ret);
1473
1474         ins.objectid = inode->i_ino;
1475         ins.offset = file_pos;
1476         ins.type = BTRFS_EXTENT_DATA_KEY;
1477         ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1478         BUG_ON(ret);
1479         leaf = path->nodes[0];
1480         fi = btrfs_item_ptr(leaf, path->slots[0],
1481                             struct btrfs_file_extent_item);
1482         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1483         btrfs_set_file_extent_type(leaf, fi, extent_type);
1484         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1485         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1486         btrfs_set_file_extent_offset(leaf, fi, 0);
1487         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1488         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1489         btrfs_set_file_extent_compression(leaf, fi, compression);
1490         btrfs_set_file_extent_encryption(leaf, fi, encryption);
1491         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1492
1493         btrfs_unlock_up_safe(path, 1);
1494         btrfs_set_lock_blocking(leaf);
1495
1496         btrfs_mark_buffer_dirty(leaf);
1497
1498         inode_add_bytes(inode, num_bytes);
1499
1500         ins.objectid = disk_bytenr;
1501         ins.offset = disk_num_bytes;
1502         ins.type = BTRFS_EXTENT_ITEM_KEY;
1503         ret = btrfs_alloc_reserved_file_extent(trans, root,
1504                                         root->root_key.objectid,
1505                                         inode->i_ino, file_pos, &ins);
1506         BUG_ON(ret);
1507         btrfs_free_path(path);
1508
1509         return 0;
1510 }
1511
1512 /*
1513  * helper function for btrfs_finish_ordered_io, this
1514  * just reads in some of the csum leaves to prime them into ram
1515  * before we start the transaction.  It limits the amount of btree
1516  * reads required while inside the transaction.
1517  */
1518 static noinline void reada_csum(struct btrfs_root *root,
1519                                 struct btrfs_path *path,
1520                                 struct btrfs_ordered_extent *ordered_extent)
1521 {
1522         struct btrfs_ordered_sum *sum;
1523         u64 bytenr;
1524
1525         sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1526                          list);
1527         bytenr = sum->sums[0].bytenr;
1528
1529         /*
1530          * we don't care about the results, the point of this search is
1531          * just to get the btree leaves into ram
1532          */
1533         btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1534 }
1535
1536 /* as ordered data IO finishes, this gets called so we can finish
1537  * an ordered extent if the range of bytes in the file it covers are
1538  * fully written.
1539  */
1540 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1541 {
1542         struct btrfs_root *root = BTRFS_I(inode)->root;
1543         struct btrfs_trans_handle *trans;
1544         struct btrfs_ordered_extent *ordered_extent = NULL;
1545         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1546         struct btrfs_path *path;
1547         int compressed = 0;
1548         int ret;
1549
1550         ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1551         if (!ret)
1552                 return 0;
1553
1554         /*
1555          * before we join the transaction, try to do some of our IO.
1556          * This will limit the amount of IO that we have to do with
1557          * the transaction running.  We're unlikely to need to do any
1558          * IO if the file extents are new, the disk_i_size checks
1559          * covers the most common case.
1560          */
1561         if (start < BTRFS_I(inode)->disk_i_size) {
1562                 path = btrfs_alloc_path();
1563                 if (path) {
1564                         ret = btrfs_lookup_file_extent(NULL, root, path,
1565                                                        inode->i_ino,
1566                                                        start, 0);
1567                         ordered_extent = btrfs_lookup_ordered_extent(inode,
1568                                                                      start);
1569                         if (!list_empty(&ordered_extent->list)) {
1570                                 btrfs_release_path(root, path);
1571                                 reada_csum(root, path, ordered_extent);
1572                         }
1573                         btrfs_free_path(path);
1574                 }
1575         }
1576
1577         trans = btrfs_join_transaction(root, 1);
1578
1579         if (!ordered_extent)
1580                 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1581         BUG_ON(!ordered_extent);
1582         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1583                 goto nocow;
1584
1585         lock_extent(io_tree, ordered_extent->file_offset,
1586                     ordered_extent->file_offset + ordered_extent->len - 1,
1587                     GFP_NOFS);
1588
1589         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1590                 compressed = 1;
1591         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1592                 BUG_ON(compressed);
1593                 ret = btrfs_mark_extent_written(trans, root, inode,
1594                                                 ordered_extent->file_offset,
1595                                                 ordered_extent->file_offset +
1596                                                 ordered_extent->len);
1597                 BUG_ON(ret);
1598         } else {
1599                 ret = insert_reserved_file_extent(trans, inode,
1600                                                 ordered_extent->file_offset,
1601                                                 ordered_extent->start,
1602                                                 ordered_extent->disk_len,
1603                                                 ordered_extent->len,
1604                                                 ordered_extent->len,
1605                                                 ordered_extent->file_offset +
1606                                                 ordered_extent->len,
1607                                                 compressed, 0, 0,
1608                                                 BTRFS_FILE_EXTENT_REG);
1609                 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1610                                    ordered_extent->file_offset,
1611                                    ordered_extent->len);
1612                 BUG_ON(ret);
1613         }
1614         unlock_extent(io_tree, ordered_extent->file_offset,
1615                     ordered_extent->file_offset + ordered_extent->len - 1,
1616                     GFP_NOFS);
1617 nocow:
1618         add_pending_csums(trans, inode, ordered_extent->file_offset,
1619                           &ordered_extent->list);
1620
1621         mutex_lock(&BTRFS_I(inode)->extent_mutex);
1622         btrfs_ordered_update_i_size(inode, ordered_extent);
1623         btrfs_update_inode(trans, root, inode);
1624         btrfs_remove_ordered_extent(inode, ordered_extent);
1625         mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1626
1627         /* once for us */
1628         btrfs_put_ordered_extent(ordered_extent);
1629         /* once for the tree */
1630         btrfs_put_ordered_extent(ordered_extent);
1631
1632         btrfs_end_transaction(trans, root);
1633         return 0;
1634 }
1635
1636 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1637                                 struct extent_state *state, int uptodate)
1638 {
1639         ClearPagePrivate2(page);
1640         return btrfs_finish_ordered_io(page->mapping->host, start, end);
1641 }
1642
1643 /*
1644  * When IO fails, either with EIO or csum verification fails, we
1645  * try other mirrors that might have a good copy of the data.  This
1646  * io_failure_record is used to record state as we go through all the
1647  * mirrors.  If another mirror has good data, the page is set up to date
1648  * and things continue.  If a good mirror can't be found, the original
1649  * bio end_io callback is called to indicate things have failed.
1650  */
1651 struct io_failure_record {
1652         struct page *page;
1653         u64 start;
1654         u64 len;
1655         u64 logical;
1656         unsigned long bio_flags;
1657         int last_mirror;
1658 };
1659
1660 static int btrfs_io_failed_hook(struct bio *failed_bio,
1661                          struct page *page, u64 start, u64 end,
1662                          struct extent_state *state)
1663 {
1664         struct io_failure_record *failrec = NULL;
1665         u64 private;
1666         struct extent_map *em;
1667         struct inode *inode = page->mapping->host;
1668         struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1669         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1670         struct bio *bio;
1671         int num_copies;
1672         int ret;
1673         int rw;
1674         u64 logical;
1675
1676         ret = get_state_private(failure_tree, start, &private);
1677         if (ret) {
1678                 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1679                 if (!failrec)
1680                         return -ENOMEM;
1681                 failrec->start = start;
1682                 failrec->len = end - start + 1;
1683                 failrec->last_mirror = 0;
1684                 failrec->bio_flags = 0;
1685
1686                 read_lock(&em_tree->lock);
1687                 em = lookup_extent_mapping(em_tree, start, failrec->len);
1688                 if (em->start > start || em->start + em->len < start) {
1689                         free_extent_map(em);
1690                         em = NULL;
1691                 }
1692                 read_unlock(&em_tree->lock);
1693
1694                 if (!em || IS_ERR(em)) {
1695                         kfree(failrec);
1696                         return -EIO;
1697                 }
1698                 logical = start - em->start;
1699                 logical = em->block_start + logical;
1700                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1701                         logical = em->block_start;
1702                         failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1703                 }
1704                 failrec->logical = logical;
1705                 free_extent_map(em);
1706                 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1707                                 EXTENT_DIRTY, GFP_NOFS);
1708                 set_state_private(failure_tree, start,
1709                                  (u64)(unsigned long)failrec);
1710         } else {
1711                 failrec = (struct io_failure_record *)(unsigned long)private;
1712         }
1713         num_copies = btrfs_num_copies(
1714                               &BTRFS_I(inode)->root->fs_info->mapping_tree,
1715                               failrec->logical, failrec->len);
1716         failrec->last_mirror++;
1717         if (!state) {
1718                 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1719                 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1720                                                     failrec->start,
1721                                                     EXTENT_LOCKED);
1722                 if (state && state->start != failrec->start)
1723                         state = NULL;
1724                 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1725         }
1726         if (!state || failrec->last_mirror > num_copies) {
1727                 set_state_private(failure_tree, failrec->start, 0);
1728                 clear_extent_bits(failure_tree, failrec->start,
1729                                   failrec->start + failrec->len - 1,
1730                                   EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1731                 kfree(failrec);
1732                 return -EIO;
1733         }
1734         bio = bio_alloc(GFP_NOFS, 1);
1735         bio->bi_private = state;
1736         bio->bi_end_io = failed_bio->bi_end_io;
1737         bio->bi_sector = failrec->logical >> 9;
1738         bio->bi_bdev = failed_bio->bi_bdev;
1739         bio->bi_size = 0;
1740
1741         bio_add_page(bio, page, failrec->len, start - page_offset(page));
1742         if (failed_bio->bi_rw & (1 << BIO_RW))
1743                 rw = WRITE;
1744         else
1745                 rw = READ;
1746
1747         BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1748                                                       failrec->last_mirror,
1749                                                       failrec->bio_flags);
1750         return 0;
1751 }
1752
1753 /*
1754  * each time an IO finishes, we do a fast check in the IO failure tree
1755  * to see if we need to process or clean up an io_failure_record
1756  */
1757 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1758 {
1759         u64 private;
1760         u64 private_failure;
1761         struct io_failure_record *failure;
1762         int ret;
1763
1764         private = 0;
1765         if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1766                              (u64)-1, 1, EXTENT_DIRTY)) {
1767                 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1768                                         start, &private_failure);
1769                 if (ret == 0) {
1770                         failure = (struct io_failure_record *)(unsigned long)
1771                                    private_failure;
1772                         set_state_private(&BTRFS_I(inode)->io_failure_tree,
1773                                           failure->start, 0);
1774                         clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1775                                           failure->start,
1776                                           failure->start + failure->len - 1,
1777                                           EXTENT_DIRTY | EXTENT_LOCKED,
1778                                           GFP_NOFS);
1779                         kfree(failure);
1780                 }
1781         }
1782         return 0;
1783 }
1784
1785 /*
1786  * when reads are done, we need to check csums to verify the data is correct
1787  * if there's a match, we allow the bio to finish.  If not, we go through
1788  * the io_failure_record routines to find good copies
1789  */
1790 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1791                                struct extent_state *state)
1792 {
1793         size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1794         struct inode *inode = page->mapping->host;
1795         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1796         char *kaddr;
1797         u64 private = ~(u32)0;
1798         int ret;
1799         struct btrfs_root *root = BTRFS_I(inode)->root;
1800         u32 csum = ~(u32)0;
1801
1802         if (PageChecked(page)) {
1803                 ClearPageChecked(page);
1804                 goto good;
1805         }
1806
1807         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1808                 return 0;
1809
1810         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1811             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1812                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1813                                   GFP_NOFS);
1814                 return 0;
1815         }
1816
1817         if (state && state->start == start) {
1818                 private = state->private;
1819                 ret = 0;
1820         } else {
1821                 ret = get_state_private(io_tree, start, &private);
1822         }
1823         kaddr = kmap_atomic(page, KM_USER0);
1824         if (ret)
1825                 goto zeroit;
1826
1827         csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
1828         btrfs_csum_final(csum, (char *)&csum);
1829         if (csum != private)
1830                 goto zeroit;
1831
1832         kunmap_atomic(kaddr, KM_USER0);
1833 good:
1834         /* if the io failure tree for this inode is non-empty,
1835          * check to see if we've recovered from a failed IO
1836          */
1837         btrfs_clean_io_failures(inode, start);
1838         return 0;
1839
1840 zeroit:
1841         if (printk_ratelimit()) {
1842                 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1843                        "private %llu\n", page->mapping->host->i_ino,
1844                        (unsigned long long)start, csum,
1845                        (unsigned long long)private);
1846         }
1847         memset(kaddr + offset, 1, end - start + 1);
1848         flush_dcache_page(page);
1849         kunmap_atomic(kaddr, KM_USER0);
1850         if (private == 0)
1851                 return 0;
1852         return -EIO;
1853 }
1854
1855 /*
1856  * This creates an orphan entry for the given inode in case something goes
1857  * wrong in the middle of an unlink/truncate.
1858  */
1859 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1860 {
1861         struct btrfs_root *root = BTRFS_I(inode)->root;
1862         int ret = 0;
1863
1864         spin_lock(&root->list_lock);
1865
1866         /* already on the orphan list, we're good */
1867         if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1868                 spin_unlock(&root->list_lock);
1869                 return 0;
1870         }
1871
1872         list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1873
1874         spin_unlock(&root->list_lock);
1875
1876         /*
1877          * insert an orphan item to track this unlinked/truncated file
1878          */
1879         ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1880
1881         return ret;
1882 }
1883
1884 /*
1885  * We have done the truncate/delete so we can go ahead and remove the orphan
1886  * item for this particular inode.
1887  */
1888 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1889 {
1890         struct btrfs_root *root = BTRFS_I(inode)->root;
1891         int ret = 0;
1892
1893         spin_lock(&root->list_lock);
1894
1895         if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1896                 spin_unlock(&root->list_lock);
1897                 return 0;
1898         }
1899
1900         list_del_init(&BTRFS_I(inode)->i_orphan);
1901         if (!trans) {
1902                 spin_unlock(&root->list_lock);
1903                 return 0;
1904         }
1905
1906         spin_unlock(&root->list_lock);
1907
1908         ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1909
1910         return ret;
1911 }
1912
1913 /*
1914  * this cleans up any orphans that may be left on the list from the last use
1915  * of this root.
1916  */
1917 void btrfs_orphan_cleanup(struct btrfs_root *root)
1918 {
1919         struct btrfs_path *path;
1920         struct extent_buffer *leaf;
1921         struct btrfs_item *item;
1922         struct btrfs_key key, found_key;
1923         struct btrfs_trans_handle *trans;
1924         struct inode *inode;
1925         int ret = 0, nr_unlink = 0, nr_truncate = 0;
1926
1927         path = btrfs_alloc_path();
1928         if (!path)
1929                 return;
1930         path->reada = -1;
1931
1932         key.objectid = BTRFS_ORPHAN_OBJECTID;
1933         btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1934         key.offset = (u64)-1;
1935
1936
1937         while (1) {
1938                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1939                 if (ret < 0) {
1940                         printk(KERN_ERR "Error searching slot for orphan: %d"
1941                                "\n", ret);
1942                         break;
1943                 }
1944
1945                 /*
1946                  * if ret == 0 means we found what we were searching for, which
1947                  * is weird, but possible, so only screw with path if we didnt
1948                  * find the key and see if we have stuff that matches
1949                  */
1950                 if (ret > 0) {
1951                         if (path->slots[0] == 0)
1952                                 break;
1953                         path->slots[0]--;
1954                 }
1955
1956                 /* pull out the item */
1957                 leaf = path->nodes[0];
1958                 item = btrfs_item_nr(leaf, path->slots[0]);
1959                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1960
1961                 /* make sure the item matches what we want */
1962                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1963                         break;
1964                 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1965                         break;
1966
1967                 /* release the path since we're done with it */
1968                 btrfs_release_path(root, path);
1969
1970                 /*
1971                  * this is where we are basically btrfs_lookup, without the
1972                  * crossing root thing.  we store the inode number in the
1973                  * offset of the orphan item.
1974                  */
1975                 found_key.objectid = found_key.offset;
1976                 found_key.type = BTRFS_INODE_ITEM_KEY;
1977                 found_key.offset = 0;
1978                 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
1979                 if (IS_ERR(inode))
1980                         break;
1981
1982                 /*
1983                  * add this inode to the orphan list so btrfs_orphan_del does
1984                  * the proper thing when we hit it
1985                  */
1986                 spin_lock(&root->list_lock);
1987                 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1988                 spin_unlock(&root->list_lock);
1989
1990                 /*
1991                  * if this is a bad inode, means we actually succeeded in
1992                  * removing the inode, but not the orphan record, which means
1993                  * we need to manually delete the orphan since iput will just
1994                  * do a destroy_inode
1995                  */
1996                 if (is_bad_inode(inode)) {
1997                         trans = btrfs_start_transaction(root, 1);
1998                         btrfs_orphan_del(trans, inode);
1999                         btrfs_end_transaction(trans, root);
2000                         iput(inode);
2001                         continue;
2002                 }
2003
2004                 /* if we have links, this was a truncate, lets do that */
2005                 if (inode->i_nlink) {
2006                         nr_truncate++;
2007                         btrfs_truncate(inode);
2008                 } else {
2009                         nr_unlink++;
2010                 }
2011
2012                 /* this will do delete_inode and everything for us */
2013                 iput(inode);
2014         }
2015
2016         if (nr_unlink)
2017                 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2018         if (nr_truncate)
2019                 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2020
2021         btrfs_free_path(path);
2022 }
2023
2024 /*
2025  * very simple check to peek ahead in the leaf looking for xattrs.  If we
2026  * don't find any xattrs, we know there can't be any acls.
2027  *
2028  * slot is the slot the inode is in, objectid is the objectid of the inode
2029  */
2030 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2031                                           int slot, u64 objectid)
2032 {
2033         u32 nritems = btrfs_header_nritems(leaf);
2034         struct btrfs_key found_key;
2035         int scanned = 0;
2036
2037         slot++;
2038         while (slot < nritems) {
2039                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2040
2041                 /* we found a different objectid, there must not be acls */
2042                 if (found_key.objectid != objectid)
2043                         return 0;
2044
2045                 /* we found an xattr, assume we've got an acl */
2046                 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2047                         return 1;
2048
2049                 /*
2050                  * we found a key greater than an xattr key, there can't
2051                  * be any acls later on
2052                  */
2053                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2054                         return 0;
2055
2056                 slot++;
2057                 scanned++;
2058
2059                 /*
2060                  * it goes inode, inode backrefs, xattrs, extents,
2061                  * so if there are a ton of hard links to an inode there can
2062                  * be a lot of backrefs.  Don't waste time searching too hard,
2063                  * this is just an optimization
2064                  */
2065                 if (scanned >= 8)
2066                         break;
2067         }
2068         /* we hit the end of the leaf before we found an xattr or
2069          * something larger than an xattr.  We have to assume the inode
2070          * has acls
2071          */
2072         return 1;
2073 }
2074
2075 /*
2076  * read an inode from the btree into the in-memory inode
2077  */
2078 static void btrfs_read_locked_inode(struct inode *inode)
2079 {
2080         struct btrfs_path *path;
2081         struct extent_buffer *leaf;
2082         struct btrfs_inode_item *inode_item;
2083         struct btrfs_timespec *tspec;
2084         struct btrfs_root *root = BTRFS_I(inode)->root;
2085         struct btrfs_key location;
2086         int maybe_acls;
2087         u64 alloc_group_block;
2088         u32 rdev;
2089         int ret;
2090
2091         path = btrfs_alloc_path();
2092         BUG_ON(!path);
2093         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2094
2095         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2096         if (ret)
2097                 goto make_bad;
2098
2099         leaf = path->nodes[0];
2100         inode_item = btrfs_item_ptr(leaf, path->slots[0],
2101                                     struct btrfs_inode_item);
2102
2103         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2104         inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2105         inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2106         inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2107         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2108
2109         tspec = btrfs_inode_atime(inode_item);
2110         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2111         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2112
2113         tspec = btrfs_inode_mtime(inode_item);
2114         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2115         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2116
2117         tspec = btrfs_inode_ctime(inode_item);
2118         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2119         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2120
2121         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2122         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2123         BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2124         inode->i_generation = BTRFS_I(inode)->generation;
2125         inode->i_rdev = 0;
2126         rdev = btrfs_inode_rdev(leaf, inode_item);
2127
2128         BTRFS_I(inode)->index_cnt = (u64)-1;
2129         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2130
2131         alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2132
2133         /*
2134          * try to precache a NULL acl entry for files that don't have
2135          * any xattrs or acls
2136          */
2137         maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2138         if (!maybe_acls) {
2139                 BTRFS_I(inode)->i_acl = NULL;
2140                 BTRFS_I(inode)->i_default_acl = NULL;
2141         }
2142
2143         BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2144                                                 alloc_group_block, 0);
2145         btrfs_free_path(path);
2146         inode_item = NULL;
2147
2148         switch (inode->i_mode & S_IFMT) {
2149         case S_IFREG:
2150                 inode->i_mapping->a_ops = &btrfs_aops;
2151                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2152                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2153                 inode->i_fop = &btrfs_file_operations;
2154                 inode->i_op = &btrfs_file_inode_operations;
2155                 break;
2156         case S_IFDIR:
2157                 inode->i_fop = &btrfs_dir_file_operations;
2158                 if (root == root->fs_info->tree_root)
2159                         inode->i_op = &btrfs_dir_ro_inode_operations;
2160                 else
2161                         inode->i_op = &btrfs_dir_inode_operations;
2162                 break;
2163         case S_IFLNK:
2164                 inode->i_op = &btrfs_symlink_inode_operations;
2165                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2166                 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2167                 break;
2168         default:
2169                 inode->i_op = &btrfs_special_inode_operations;
2170                 init_special_inode(inode, inode->i_mode, rdev);
2171                 break;
2172         }
2173
2174         btrfs_update_iflags(inode);
2175         return;
2176
2177 make_bad:
2178         btrfs_free_path(path);
2179         make_bad_inode(inode);
2180 }
2181
2182 /*
2183  * given a leaf and an inode, copy the inode fields into the leaf
2184  */
2185 static void fill_inode_item(struct btrfs_trans_handle *trans,
2186                             struct extent_buffer *leaf,
2187                             struct btrfs_inode_item *item,
2188                             struct inode *inode)
2189 {
2190         btrfs_set_inode_uid(leaf, item, inode->i_uid);
2191         btrfs_set_inode_gid(leaf, item, inode->i_gid);
2192         btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2193         btrfs_set_inode_mode(leaf, item, inode->i_mode);
2194         btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2195
2196         btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2197                                inode->i_atime.tv_sec);
2198         btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2199                                 inode->i_atime.tv_nsec);
2200
2201         btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2202                                inode->i_mtime.tv_sec);
2203         btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2204                                 inode->i_mtime.tv_nsec);
2205
2206         btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2207                                inode->i_ctime.tv_sec);
2208         btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2209                                 inode->i_ctime.tv_nsec);
2210
2211         btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2212         btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2213         btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2214         btrfs_set_inode_transid(leaf, item, trans->transid);
2215         btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2216         btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2217         btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2218 }
2219
2220 /*
2221  * copy everything in the in-memory inode into the btree.
2222  */
2223 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2224                                 struct btrfs_root *root, struct inode *inode)
2225 {
2226         struct btrfs_inode_item *inode_item;
2227         struct btrfs_path *path;
2228         struct extent_buffer *leaf;
2229         int ret;
2230
2231         path = btrfs_alloc_path();
2232         BUG_ON(!path);
2233         path->leave_spinning = 1;
2234         ret = btrfs_lookup_inode(trans, root, path,
2235                                  &BTRFS_I(inode)->location, 1);
2236         if (ret) {
2237                 if (ret > 0)
2238                         ret = -ENOENT;
2239                 goto failed;
2240         }
2241
2242         btrfs_unlock_up_safe(path, 1);
2243         leaf = path->nodes[0];
2244         inode_item = btrfs_item_ptr(leaf, path->slots[0],
2245                                   struct btrfs_inode_item);
2246
2247         fill_inode_item(trans, leaf, inode_item, inode);
2248         btrfs_mark_buffer_dirty(leaf);
2249         btrfs_set_inode_last_trans(trans, inode);
2250         ret = 0;
2251 failed:
2252         btrfs_free_path(path);
2253         return ret;
2254 }
2255
2256
2257 /*
2258  * unlink helper that gets used here in inode.c and in the tree logging
2259  * recovery code.  It remove a link in a directory with a given name, and
2260  * also drops the back refs in the inode to the directory
2261  */
2262 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2263                        struct btrfs_root *root,
2264                        struct inode *dir, struct inode *inode,
2265                        const char *name, int name_len)
2266 {
2267         struct btrfs_path *path;
2268         int ret = 0;
2269         struct extent_buffer *leaf;
2270         struct btrfs_dir_item *di;
2271         struct btrfs_key key;
2272         u64 index;
2273
2274         path = btrfs_alloc_path();
2275         if (!path) {
2276                 ret = -ENOMEM;
2277                 goto err;
2278         }
2279
2280         path->leave_spinning = 1;
2281         di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2282                                     name, name_len, -1);
2283         if (IS_ERR(di)) {
2284                 ret = PTR_ERR(di);
2285                 goto err;
2286         }
2287         if (!di) {
2288                 ret = -ENOENT;
2289                 goto err;
2290         }
2291         leaf = path->nodes[0];
2292         btrfs_dir_item_key_to_cpu(leaf, di, &key);
2293         ret = btrfs_delete_one_dir_name(trans, root, path, di);
2294         if (ret)
2295                 goto err;
2296         btrfs_release_path(root, path);
2297
2298         ret = btrfs_del_inode_ref(trans, root, name, name_len,
2299                                   inode->i_ino,
2300                                   dir->i_ino, &index);
2301         if (ret) {
2302                 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2303                        "inode %lu parent %lu\n", name_len, name,
2304                        inode->i_ino, dir->i_ino);
2305                 goto err;
2306         }
2307
2308         di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2309                                          index, name, name_len, -1);
2310         if (IS_ERR(di)) {
2311                 ret = PTR_ERR(di);
2312                 goto err;
2313         }
2314         if (!di) {
2315                 ret = -ENOENT;
2316                 goto err;
2317         }
2318         ret = btrfs_delete_one_dir_name(trans, root, path, di);
2319         btrfs_release_path(root, path);
2320
2321         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2322                                          inode, dir->i_ino);
2323         BUG_ON(ret != 0 && ret != -ENOENT);
2324
2325         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2326                                            dir, index);
2327         BUG_ON(ret);
2328 err:
2329         btrfs_free_path(path);
2330         if (ret)
2331                 goto out;
2332
2333         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2334         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2335         btrfs_update_inode(trans, root, dir);
2336         btrfs_drop_nlink(inode);
2337         ret = btrfs_update_inode(trans, root, inode);
2338         dir->i_sb->s_dirt = 1;
2339 out:
2340         return ret;
2341 }
2342
2343 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2344 {
2345         struct btrfs_root *root;
2346         struct btrfs_trans_handle *trans;
2347         struct inode *inode = dentry->d_inode;
2348         int ret;
2349         unsigned long nr = 0;
2350
2351         root = BTRFS_I(dir)->root;
2352
2353         trans = btrfs_start_transaction(root, 1);
2354
2355         btrfs_set_trans_block_group(trans, dir);
2356
2357         btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2358
2359         ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2360                                  dentry->d_name.name, dentry->d_name.len);
2361
2362         if (inode->i_nlink == 0)
2363                 ret = btrfs_orphan_add(trans, inode);
2364
2365         nr = trans->blocks_used;
2366
2367         btrfs_end_transaction_throttle(trans, root);
2368         btrfs_btree_balance_dirty(root, nr);
2369         return ret;
2370 }
2371
2372 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2373 {
2374         struct inode *inode = dentry->d_inode;
2375         int err = 0;
2376         int ret;
2377         struct btrfs_root *root = BTRFS_I(dir)->root;
2378         struct btrfs_trans_handle *trans;
2379         unsigned long nr = 0;
2380
2381         /*
2382          * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2383          * the root of a subvolume or snapshot
2384          */
2385         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2386             inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2387                 return -ENOTEMPTY;
2388         }
2389
2390         trans = btrfs_start_transaction(root, 1);
2391         btrfs_set_trans_block_group(trans, dir);
2392
2393         err = btrfs_orphan_add(trans, inode);
2394         if (err)
2395                 goto fail_trans;
2396
2397         /* now the directory is empty */
2398         err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2399                                  dentry->d_name.name, dentry->d_name.len);
2400         if (!err)
2401                 btrfs_i_size_write(inode, 0);
2402
2403 fail_trans:
2404         nr = trans->blocks_used;
2405         ret = btrfs_end_transaction_throttle(trans, root);
2406         btrfs_btree_balance_dirty(root, nr);
2407
2408         if (ret && !err)
2409                 err = ret;
2410         return err;
2411 }
2412
2413 #if 0
2414 /*
2415  * when truncating bytes in a file, it is possible to avoid reading
2416  * the leaves that contain only checksum items.  This can be the
2417  * majority of the IO required to delete a large file, but it must
2418  * be done carefully.
2419  *
2420  * The keys in the level just above the leaves are checked to make sure
2421  * the lowest key in a given leaf is a csum key, and starts at an offset
2422  * after the new  size.
2423  *
2424  * Then the key for the next leaf is checked to make sure it also has
2425  * a checksum item for the same file.  If it does, we know our target leaf
2426  * contains only checksum items, and it can be safely freed without reading
2427  * it.
2428  *
2429  * This is just an optimization targeted at large files.  It may do
2430  * nothing.  It will return 0 unless things went badly.
2431  */
2432 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2433                                      struct btrfs_root *root,
2434                                      struct btrfs_path *path,
2435                                      struct inode *inode, u64 new_size)
2436 {
2437         struct btrfs_key key;
2438         int ret;
2439         int nritems;
2440         struct btrfs_key found_key;
2441         struct btrfs_key other_key;
2442         struct btrfs_leaf_ref *ref;
2443         u64 leaf_gen;
2444         u64 leaf_start;
2445
2446         path->lowest_level = 1;
2447         key.objectid = inode->i_ino;
2448         key.type = BTRFS_CSUM_ITEM_KEY;
2449         key.offset = new_size;
2450 again:
2451         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2452         if (ret < 0)
2453                 goto out;
2454
2455         if (path->nodes[1] == NULL) {
2456                 ret = 0;
2457                 goto out;
2458         }
2459         ret = 0;
2460         btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2461         nritems = btrfs_header_nritems(path->nodes[1]);
2462
2463         if (!nritems)
2464                 goto out;
2465
2466         if (path->slots[1] >= nritems)
2467                 goto next_node;
2468
2469         /* did we find a key greater than anything we want to delete? */
2470         if (found_key.objectid > inode->i_ino ||
2471            (found_key.objectid == inode->i_ino && found_key.type > key.type))
2472                 goto out;
2473
2474         /* we check the next key in the node to make sure the leave contains
2475          * only checksum items.  This comparison doesn't work if our
2476          * leaf is the last one in the node
2477          */
2478         if (path->slots[1] + 1 >= nritems) {
2479 next_node:
2480                 /* search forward from the last key in the node, this
2481                  * will bring us into the next node in the tree
2482                  */
2483                 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2484
2485                 /* unlikely, but we inc below, so check to be safe */
2486                 if (found_key.offset == (u64)-1)
2487                         goto out;
2488
2489                 /* search_forward needs a path with locks held, do the
2490                  * search again for the original key.  It is possible
2491                  * this will race with a balance and return a path that
2492                  * we could modify, but this drop is just an optimization
2493                  * and is allowed to miss some leaves.
2494                  */
2495                 btrfs_release_path(root, path);
2496                 found_key.offset++;
2497
2498                 /* setup a max key for search_forward */
2499                 other_key.offset = (u64)-1;
2500                 other_key.type = key.type;
2501                 other_key.objectid = key.objectid;
2502
2503                 path->keep_locks = 1;
2504                 ret = btrfs_search_forward(root, &found_key, &other_key,
2505                                            path, 0, 0);
2506                 path->keep_locks = 0;
2507                 if (ret || found_key.objectid != key.objectid ||
2508                     found_key.type != key.type) {
2509                         ret = 0;
2510                         goto out;
2511                 }
2512
2513                 key.offset = found_key.offset;
2514                 btrfs_release_path(root, path);
2515                 cond_resched();
2516                 goto again;
2517         }
2518
2519         /* we know there's one more slot after us in the tree,
2520          * read that key so we can verify it is also a checksum item
2521          */
2522         btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2523
2524         if (found_key.objectid < inode->i_ino)
2525                 goto next_key;
2526
2527         if (found_key.type != key.type || found_key.offset < new_size)
2528                 goto next_key;
2529
2530         /*
2531          * if the key for the next leaf isn't a csum key from this objectid,
2532          * we can't be sure there aren't good items inside this leaf.
2533          * Bail out
2534          */
2535         if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2536                 goto out;
2537
2538         leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2539         leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2540         /*
2541          * it is safe to delete this leaf, it contains only
2542          * csum items from this inode at an offset >= new_size
2543          */
2544         ret = btrfs_del_leaf(trans, root, path, leaf_start);
2545         BUG_ON(ret);
2546
2547         if (root->ref_cows && leaf_gen < trans->transid) {
2548                 ref = btrfs_alloc_leaf_ref(root, 0);
2549                 if (ref) {
2550                         ref->root_gen = root->root_key.offset;
2551                         ref->bytenr = leaf_start;
2552                         ref->owner = 0;
2553                         ref->generation = leaf_gen;
2554                         ref->nritems = 0;
2555
2556                         btrfs_sort_leaf_ref(ref);
2557
2558                         ret = btrfs_add_leaf_ref(root, ref, 0);
2559                         WARN_ON(ret);
2560                         btrfs_free_leaf_ref(root, ref);
2561                 } else {
2562                         WARN_ON(1);
2563                 }
2564         }
2565 next_key:
2566         btrfs_release_path(root, path);
2567
2568         if (other_key.objectid == inode->i_ino &&
2569             other_key.type == key.type && other_key.offset > key.offset) {
2570                 key.offset = other_key.offset;
2571                 cond_resched();
2572                 goto again;
2573         }
2574         ret = 0;
2575 out:
2576         /* fixup any changes we've made to the path */
2577         path->lowest_level = 0;
2578         path->keep_locks = 0;
2579         btrfs_release_path(root, path);
2580         return ret;
2581 }
2582
2583 #endif
2584
2585 /*
2586  * this can truncate away extent items, csum items and directory items.
2587  * It starts at a high offset and removes keys until it can't find
2588  * any higher than new_size
2589  *
2590  * csum items that cross the new i_size are truncated to the new size
2591  * as well.
2592  *
2593  * min_type is the minimum key type to truncate down to.  If set to 0, this
2594  * will kill all the items on this inode, including the INODE_ITEM_KEY.
2595  */
2596 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2597                                         struct btrfs_root *root,
2598                                         struct inode *inode,
2599                                         u64 new_size, u32 min_type)
2600 {
2601         int ret;
2602         struct btrfs_path *path;
2603         struct btrfs_key key;
2604         struct btrfs_key found_key;
2605         u32 found_type = (u8)-1;
2606         struct extent_buffer *leaf;
2607         struct btrfs_file_extent_item *fi;
2608         u64 extent_start = 0;
2609         u64 extent_num_bytes = 0;
2610         u64 extent_offset = 0;
2611         u64 item_end = 0;
2612         int found_extent;
2613         int del_item;
2614         int pending_del_nr = 0;
2615         int pending_del_slot = 0;
2616         int extent_type = -1;
2617         int encoding;
2618         u64 mask = root->sectorsize - 1;
2619
2620         if (root->ref_cows)
2621                 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2622         path = btrfs_alloc_path();
2623         BUG_ON(!path);
2624         path->reada = -1;
2625
2626         /* FIXME, add redo link to tree so we don't leak on crash */
2627         key.objectid = inode->i_ino;
2628         key.offset = (u64)-1;
2629         key.type = (u8)-1;
2630
2631 search_again:
2632         path->leave_spinning = 1;
2633         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2634         if (ret < 0)
2635                 goto error;
2636
2637         if (ret > 0) {
2638                 /* there are no items in the tree for us to truncate, we're
2639                  * done
2640                  */
2641                 if (path->slots[0] == 0) {
2642                         ret = 0;
2643                         goto error;
2644                 }
2645                 path->slots[0]--;
2646         }
2647
2648         while (1) {
2649                 fi = NULL;
2650                 leaf = path->nodes[0];
2651                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2652                 found_type = btrfs_key_type(&found_key);
2653                 encoding = 0;
2654
2655                 if (found_key.objectid != inode->i_ino)
2656                         break;
2657
2658                 if (found_type < min_type)
2659                         break;
2660
2661                 item_end = found_key.offset;
2662                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2663                         fi = btrfs_item_ptr(leaf, path->slots[0],
2664                                             struct btrfs_file_extent_item);
2665                         extent_type = btrfs_file_extent_type(leaf, fi);
2666                         encoding = btrfs_file_extent_compression(leaf, fi);
2667                         encoding |= btrfs_file_extent_encryption(leaf, fi);
2668                         encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2669
2670                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2671                                 item_end +=
2672                                     btrfs_file_extent_num_bytes(leaf, fi);
2673                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2674                                 item_end += btrfs_file_extent_inline_len(leaf,
2675                                                                          fi);
2676                         }
2677                         item_end--;
2678                 }
2679                 if (item_end < new_size) {
2680                         if (found_type == BTRFS_DIR_ITEM_KEY)
2681                                 found_type = BTRFS_INODE_ITEM_KEY;
2682                         else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2683                                 found_type = BTRFS_EXTENT_DATA_KEY;
2684                         else if (found_type == BTRFS_EXTENT_DATA_KEY)
2685                                 found_type = BTRFS_XATTR_ITEM_KEY;
2686                         else if (found_type == BTRFS_XATTR_ITEM_KEY)
2687                                 found_type = BTRFS_INODE_REF_KEY;
2688                         else if (found_type)
2689                                 found_type--;
2690                         else
2691                                 break;
2692                         btrfs_set_key_type(&key, found_type);
2693                         goto next;
2694                 }
2695                 if (found_key.offset >= new_size)
2696                         del_item = 1;
2697                 else
2698                         del_item = 0;
2699                 found_extent = 0;
2700
2701                 /* FIXME, shrink the extent if the ref count is only 1 */
2702                 if (found_type != BTRFS_EXTENT_DATA_KEY)
2703                         goto delete;
2704
2705                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2706                         u64 num_dec;
2707                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2708                         if (!del_item && !encoding) {
2709                                 u64 orig_num_bytes =
2710                                         btrfs_file_extent_num_bytes(leaf, fi);
2711                                 extent_num_bytes = new_size -
2712                                         found_key.offset + root->sectorsize - 1;
2713                                 extent_num_bytes = extent_num_bytes &
2714                                         ~((u64)root->sectorsize - 1);
2715                                 btrfs_set_file_extent_num_bytes(leaf, fi,
2716                                                          extent_num_bytes);
2717                                 num_dec = (orig_num_bytes -
2718                                            extent_num_bytes);
2719                                 if (root->ref_cows && extent_start != 0)
2720                                         inode_sub_bytes(inode, num_dec);
2721                                 btrfs_mark_buffer_dirty(leaf);
2722                         } else {
2723                                 extent_num_bytes =
2724                                         btrfs_file_extent_disk_num_bytes(leaf,
2725                                                                          fi);
2726                                 extent_offset = found_key.offset -
2727                                         btrfs_file_extent_offset(leaf, fi);
2728
2729                                 /* FIXME blocksize != 4096 */
2730                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2731                                 if (extent_start != 0) {
2732                                         found_extent = 1;
2733                                         if (root->ref_cows)
2734                                                 inode_sub_bytes(inode, num_dec);
2735                                 }
2736                         }
2737                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2738                         /*
2739                          * we can't truncate inline items that have had
2740                          * special encodings
2741                          */
2742                         if (!del_item &&
2743                             btrfs_file_extent_compression(leaf, fi) == 0 &&
2744                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
2745                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2746                                 u32 size = new_size - found_key.offset;
2747
2748                                 if (root->ref_cows) {
2749                                         inode_sub_bytes(inode, item_end + 1 -
2750                                                         new_size);
2751                                 }
2752                                 size =
2753                                     btrfs_file_extent_calc_inline_size(size);
2754                                 ret = btrfs_truncate_item(trans, root, path,
2755                                                           size, 1);
2756                                 BUG_ON(ret);
2757                         } else if (root->ref_cows) {
2758                                 inode_sub_bytes(inode, item_end + 1 -
2759                                                 found_key.offset);
2760                         }
2761                 }
2762 delete:
2763                 if (del_item) {
2764                         if (!pending_del_nr) {
2765                                 /* no pending yet, add ourselves */
2766                                 pending_del_slot = path->slots[0];
2767                                 pending_del_nr = 1;
2768                         } else if (pending_del_nr &&
2769                                    path->slots[0] + 1 == pending_del_slot) {
2770                                 /* hop on the pending chunk */
2771                                 pending_del_nr++;
2772                                 pending_del_slot = path->slots[0];
2773                         } else {
2774                                 BUG();
2775                         }
2776                 } else {
2777                         break;
2778                 }
2779                 if (found_extent && root->ref_cows) {
2780                         btrfs_set_path_blocking(path);
2781                         ret = btrfs_free_extent(trans, root, extent_start,
2782                                                 extent_num_bytes, 0,
2783                                                 btrfs_header_owner(leaf),
2784                                                 inode->i_ino, extent_offset);
2785                         BUG_ON(ret);
2786                 }
2787 next:
2788                 if (path->slots[0] == 0) {
2789                         if (pending_del_nr)
2790                                 goto del_pending;
2791                         btrfs_release_path(root, path);
2792                         if (found_type == BTRFS_INODE_ITEM_KEY)
2793                                 break;
2794                         goto search_again;
2795                 }
2796
2797                 path->slots[0]--;
2798                 if (pending_del_nr &&
2799                     path->slots[0] + 1 != pending_del_slot) {
2800                         struct btrfs_key debug;
2801 del_pending:
2802                         btrfs_item_key_to_cpu(path->nodes[0], &debug,
2803                                               pending_del_slot);
2804                         ret = btrfs_del_items(trans, root, path,
2805                                               pending_del_slot,
2806                                               pending_del_nr);
2807                         BUG_ON(ret);
2808                         pending_del_nr = 0;
2809                         btrfs_release_path(root, path);
2810                         if (found_type == BTRFS_INODE_ITEM_KEY)
2811                                 break;
2812                         goto search_again;
2813                 }
2814         }
2815         ret = 0;
2816 error:
2817         if (pending_del_nr) {
2818                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2819                                       pending_del_nr);
2820         }
2821         btrfs_free_path(path);
2822         inode->i_sb->s_dirt = 1;
2823         return ret;
2824 }
2825
2826 /*
2827  * taken from block_truncate_page, but does cow as it zeros out
2828  * any bytes left in the last page in the file.
2829  */
2830 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2831 {
2832         struct inode *inode = mapping->host;
2833         struct btrfs_root *root = BTRFS_I(inode)->root;
2834         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2835         struct btrfs_ordered_extent *ordered;
2836         char *kaddr;
2837         u32 blocksize = root->sectorsize;
2838         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2839         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2840         struct page *page;
2841         int ret = 0;
2842         u64 page_start;
2843         u64 page_end;
2844
2845         if ((offset & (blocksize - 1)) == 0)
2846                 goto out;
2847
2848         ret = -ENOMEM;
2849 again:
2850         page = grab_cache_page(mapping, index);
2851         if (!page)
2852                 goto out;
2853
2854         page_start = page_offset(page);
2855         page_end = page_start + PAGE_CACHE_SIZE - 1;
2856
2857         if (!PageUptodate(page)) {
2858                 ret = btrfs_readpage(NULL, page);
2859                 lock_page(page);
2860                 if (page->mapping != mapping) {
2861                         unlock_page(page);
2862                         page_cache_release(page);
2863                         goto again;
2864                 }
2865                 if (!PageUptodate(page)) {
2866                         ret = -EIO;
2867                         goto out_unlock;
2868                 }
2869         }
2870         wait_on_page_writeback(page);
2871
2872         lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2873         set_page_extent_mapped(page);
2874
2875         ordered = btrfs_lookup_ordered_extent(inode, page_start);
2876         if (ordered) {
2877                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2878                 unlock_page(page);
2879                 page_cache_release(page);
2880                 btrfs_start_ordered_extent(inode, ordered, 1);
2881                 btrfs_put_ordered_extent(ordered);
2882                 goto again;
2883         }
2884
2885         btrfs_set_extent_delalloc(inode, page_start, page_end);
2886         ret = 0;
2887         if (offset != PAGE_CACHE_SIZE) {
2888                 kaddr = kmap(page);
2889                 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2890                 flush_dcache_page(page);
2891                 kunmap(page);
2892         }
2893         ClearPageChecked(page);
2894         set_page_dirty(page);
2895         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2896
2897 out_unlock:
2898         unlock_page(page);
2899         page_cache_release(page);
2900 out:
2901         return ret;
2902 }
2903
2904 int btrfs_cont_expand(struct inode *inode, loff_t size)
2905 {
2906         struct btrfs_trans_handle *trans;
2907         struct btrfs_root *root = BTRFS_I(inode)->root;
2908         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2909         struct extent_map *em;
2910         u64 mask = root->sectorsize - 1;
2911         u64 hole_start = (inode->i_size + mask) & ~mask;
2912         u64 block_end = (size + mask) & ~mask;
2913         u64 last_byte;
2914         u64 cur_offset;
2915         u64 hole_size;
2916         int err;
2917
2918         if (size <= hole_start)
2919                 return 0;
2920
2921         err = btrfs_check_metadata_free_space(root);
2922         if (err)
2923                 return err;
2924
2925         btrfs_truncate_page(inode->i_mapping, inode->i_size);
2926
2927         while (1) {
2928                 struct btrfs_ordered_extent *ordered;
2929                 btrfs_wait_ordered_range(inode, hole_start,
2930                                          block_end - hole_start);
2931                 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2932                 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2933                 if (!ordered)
2934                         break;
2935                 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2936                 btrfs_put_ordered_extent(ordered);
2937         }
2938
2939         trans = btrfs_start_transaction(root, 1);
2940         btrfs_set_trans_block_group(trans, inode);
2941
2942         cur_offset = hole_start;
2943         while (1) {
2944                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2945                                 block_end - cur_offset, 0);
2946                 BUG_ON(IS_ERR(em) || !em);
2947                 last_byte = min(extent_map_end(em), block_end);
2948                 last_byte = (last_byte + mask) & ~mask;
2949                 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2950                         u64 hint_byte = 0;
2951                         hole_size = last_byte - cur_offset;
2952                         err = btrfs_drop_extents(trans, root, inode,
2953                                                  cur_offset,
2954                                                  cur_offset + hole_size,
2955                                                  block_end,
2956                                                  cur_offset, &hint_byte, 1);
2957                         if (err)
2958                                 break;
2959                         err = btrfs_insert_file_extent(trans, root,
2960                                         inode->i_ino, cur_offset, 0,
2961                                         0, hole_size, 0, hole_size,
2962                                         0, 0, 0);
2963                         btrfs_drop_extent_cache(inode, hole_start,
2964                                         last_byte - 1, 0);
2965                 }
2966                 free_extent_map(em);
2967                 cur_offset = last_byte;
2968                 if (err || cur_offset >= block_end)
2969                         break;
2970         }
2971
2972         btrfs_end_transaction(trans, root);
2973         unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2974         return err;
2975 }
2976
2977 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2978 {
2979         struct inode *inode = dentry->d_inode;
2980         int err;
2981
2982         err = inode_change_ok(inode, attr);
2983         if (err)
2984                 return err;
2985
2986         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
2987                 if (attr->ia_size > inode->i_size) {
2988                         err = btrfs_cont_expand(inode, attr->ia_size);
2989                         if (err)
2990                                 return err;
2991                 } else if (inode->i_size > 0 &&
2992                            attr->ia_size == 0) {
2993
2994                         /* we're truncating a file that used to have good
2995                          * data down to zero.  Make sure it gets into
2996                          * the ordered flush list so that any new writes
2997                          * get down to disk quickly.
2998                          */
2999                         BTRFS_I(inode)->ordered_data_close = 1;
3000                 }
3001         }
3002
3003         err = inode_setattr(inode, attr);
3004
3005         if (!err && ((attr->ia_valid & ATTR_MODE)))
3006                 err = btrfs_acl_chmod(inode);
3007         return err;
3008 }
3009
3010 void btrfs_delete_inode(struct inode *inode)
3011 {
3012         struct btrfs_trans_handle *trans;
3013         struct btrfs_root *root = BTRFS_I(inode)->root;
3014         unsigned long nr;
3015         int ret;
3016
3017         truncate_inode_pages(&inode->i_data, 0);
3018         if (is_bad_inode(inode)) {
3019                 btrfs_orphan_del(NULL, inode);
3020                 goto no_delete;
3021         }
3022         btrfs_wait_ordered_range(inode, 0, (u64)-1);
3023
3024         btrfs_i_size_write(inode, 0);
3025         trans = btrfs_join_transaction(root, 1);
3026
3027         btrfs_set_trans_block_group(trans, inode);
3028         ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
3029         if (ret) {
3030                 btrfs_orphan_del(NULL, inode);
3031                 goto no_delete_lock;
3032         }
3033
3034         btrfs_orphan_del(trans, inode);
3035
3036         nr = trans->blocks_used;
3037         clear_inode(inode);
3038
3039         btrfs_end_transaction(trans, root);
3040         btrfs_btree_balance_dirty(root, nr);
3041         return;
3042
3043 no_delete_lock:
3044         nr = trans->blocks_used;
3045         btrfs_end_transaction(trans, root);
3046         btrfs_btree_balance_dirty(root, nr);
3047 no_delete:
3048         clear_inode(inode);
3049 }
3050
3051 /*
3052  * this returns the key found in the dir entry in the location pointer.
3053  * If no dir entries were found, location->objectid is 0.
3054  */
3055 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3056                                struct btrfs_key *location)
3057 {
3058         const char *name = dentry->d_name.name;
3059         int namelen = dentry->d_name.len;
3060         struct btrfs_dir_item *di;
3061         struct btrfs_path *path;
3062         struct btrfs_root *root = BTRFS_I(dir)->root;
3063         int ret = 0;
3064
3065         path = btrfs_alloc_path();
3066         BUG_ON(!path);
3067
3068         di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3069                                     namelen, 0);
3070         if (IS_ERR(di))
3071                 ret = PTR_ERR(di);
3072
3073         if (!di || IS_ERR(di))
3074                 goto out_err;
3075
3076         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3077 out:
3078         btrfs_free_path(path);
3079         return ret;
3080 out_err:
3081         location->objectid = 0;
3082         goto out;
3083 }
3084
3085 /*
3086  * when we hit a tree root in a directory, the btrfs part of the inode
3087  * needs to be changed to reflect the root directory of the tree root.  This
3088  * is kind of like crossing a mount point.
3089  */
3090 static int fixup_tree_root_location(struct btrfs_root *root,
3091                              struct btrfs_key *location,
3092                              struct btrfs_root **sub_root,
3093                              struct dentry *dentry)
3094 {
3095         struct btrfs_root_item *ri;
3096
3097         if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
3098                 return 0;
3099         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
3100                 return 0;
3101
3102         *sub_root = btrfs_read_fs_root(root->fs_info, location,
3103                                         dentry->d_name.name,
3104                                         dentry->d_name.len);
3105         if (IS_ERR(*sub_root))
3106                 return PTR_ERR(*sub_root);
3107
3108         ri = &(*sub_root)->root_item;
3109         location->objectid = btrfs_root_dirid(ri);
3110         btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3111         location->offset = 0;
3112
3113         return 0;
3114 }
3115
3116 static void inode_tree_add(struct inode *inode)
3117 {
3118         struct btrfs_root *root = BTRFS_I(inode)->root;
3119         struct btrfs_inode *entry;
3120         struct rb_node **p = &root->inode_tree.rb_node;
3121         struct rb_node *parent = NULL;
3122
3123         spin_lock(&root->inode_lock);
3124         while (*p) {
3125                 parent = *p;
3126                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3127
3128                 if (inode->i_ino < entry->vfs_inode.i_ino)
3129                         p = &(*p)->rb_left;
3130                 else if (inode->i_ino > entry->vfs_inode.i_ino)
3131                         p = &(*p)->rb_right;
3132                 else {
3133                         WARN_ON(!(entry->vfs_inode.i_state &
3134                                   (I_WILL_FREE | I_FREEING | I_CLEAR)));
3135                         break;
3136                 }
3137         }
3138         rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3139         rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3140         spin_unlock(&root->inode_lock);
3141 }
3142
3143 static void inode_tree_del(struct inode *inode)
3144 {
3145         struct btrfs_root *root = BTRFS_I(inode)->root;
3146
3147         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3148                 spin_lock(&root->inode_lock);
3149                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3150                 spin_unlock(&root->inode_lock);
3151                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3152         }
3153 }
3154
3155 static noinline void init_btrfs_i(struct inode *inode)
3156 {
3157         struct btrfs_inode *bi = BTRFS_I(inode);
3158
3159         bi->i_acl = BTRFS_ACL_NOT_CACHED;
3160         bi->i_default_acl = BTRFS_ACL_NOT_CACHED;
3161
3162         bi->generation = 0;
3163         bi->sequence = 0;
3164         bi->last_trans = 0;
3165         bi->logged_trans = 0;
3166         bi->delalloc_bytes = 0;
3167         bi->reserved_bytes = 0;
3168         bi->disk_i_size = 0;
3169         bi->flags = 0;
3170         bi->index_cnt = (u64)-1;
3171         bi->last_unlink_trans = 0;
3172         bi->ordered_data_close = 0;
3173         extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3174         extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3175                              inode->i_mapping, GFP_NOFS);
3176         extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3177                              inode->i_mapping, GFP_NOFS);
3178         INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3179         INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3180         RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3181         btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3182         mutex_init(&BTRFS_I(inode)->extent_mutex);
3183         mutex_init(&BTRFS_I(inode)->log_mutex);
3184 }
3185
3186 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3187 {
3188         struct btrfs_iget_args *args = p;
3189         inode->i_ino = args->ino;
3190         init_btrfs_i(inode);
3191         BTRFS_I(inode)->root = args->root;
3192         btrfs_set_inode_space_info(args->root, inode);
3193         return 0;
3194 }
3195
3196 static int btrfs_find_actor(struct inode *inode, void *opaque)
3197 {
3198         struct btrfs_iget_args *args = opaque;
3199         return args->ino == inode->i_ino &&
3200                 args->root == BTRFS_I(inode)->root;
3201 }
3202
3203 static struct inode *btrfs_iget_locked(struct super_block *s,
3204                                        u64 objectid,
3205                                        struct btrfs_root *root)
3206 {
3207         struct inode *inode;
3208         struct btrfs_iget_args args;
3209         args.ino = objectid;
3210         args.root = root;
3211
3212         inode = iget5_locked(s, objectid, btrfs_find_actor,
3213                              btrfs_init_locked_inode,
3214                              (void *)&args);
3215         return inode;
3216 }
3217
3218 /* Get an inode object given its location and corresponding root.
3219  * Returns in *is_new if the inode was read from disk