Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-nmw
[~shefty/rdma-dev.git] / fs / gfs2 / aops.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/backing-dev.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37
38
39 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
40                                    unsigned int from, unsigned int to)
41 {
42         struct buffer_head *head = page_buffers(page);
43         unsigned int bsize = head->b_size;
44         struct buffer_head *bh;
45         unsigned int start, end;
46
47         for (bh = head, start = 0; bh != head || !start;
48              bh = bh->b_this_page, start = end) {
49                 end = start + bsize;
50                 if (end <= from || start >= to)
51                         continue;
52                 if (gfs2_is_jdata(ip))
53                         set_buffer_uptodate(bh);
54                 gfs2_trans_add_data(ip->i_gl, bh);
55         }
56 }
57
58 /**
59  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
60  * @inode: The inode
61  * @lblock: The block number to look up
62  * @bh_result: The buffer head to return the result in
63  * @create: Non-zero if we may add block to the file
64  *
65  * Returns: errno
66  */
67
68 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
69                                   struct buffer_head *bh_result, int create)
70 {
71         int error;
72
73         error = gfs2_block_map(inode, lblock, bh_result, 0);
74         if (error)
75                 return error;
76         if (!buffer_mapped(bh_result))
77                 return -EIO;
78         return 0;
79 }
80
81 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
82                                  struct buffer_head *bh_result, int create)
83 {
84         return gfs2_block_map(inode, lblock, bh_result, 0);
85 }
86
87 /**
88  * gfs2_writepage_common - Common bits of writepage
89  * @page: The page to be written
90  * @wbc: The writeback control
91  *
92  * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
93  */
94
95 static int gfs2_writepage_common(struct page *page,
96                                  struct writeback_control *wbc)
97 {
98         struct inode *inode = page->mapping->host;
99         struct gfs2_inode *ip = GFS2_I(inode);
100         struct gfs2_sbd *sdp = GFS2_SB(inode);
101         loff_t i_size = i_size_read(inode);
102         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
103         unsigned offset;
104
105         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
106                 goto out;
107         if (current->journal_info)
108                 goto redirty;
109         /* Is the page fully outside i_size? (truncate in progress) */
110         offset = i_size & (PAGE_CACHE_SIZE-1);
111         if (page->index > end_index || (page->index == end_index && !offset)) {
112                 page->mapping->a_ops->invalidatepage(page, 0);
113                 goto out;
114         }
115         return 1;
116 redirty:
117         redirty_page_for_writepage(wbc, page);
118 out:
119         unlock_page(page);
120         return 0;
121 }
122
123 /**
124  * gfs2_writeback_writepage - Write page for writeback mappings
125  * @page: The page
126  * @wbc: The writeback control
127  *
128  */
129
130 static int gfs2_writeback_writepage(struct page *page,
131                                     struct writeback_control *wbc)
132 {
133         int ret;
134
135         ret = gfs2_writepage_common(page, wbc);
136         if (ret <= 0)
137                 return ret;
138
139         return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
140 }
141
142 /**
143  * gfs2_ordered_writepage - Write page for ordered data files
144  * @page: The page to write
145  * @wbc: The writeback control
146  *
147  */
148
149 static int gfs2_ordered_writepage(struct page *page,
150                                   struct writeback_control *wbc)
151 {
152         struct inode *inode = page->mapping->host;
153         struct gfs2_inode *ip = GFS2_I(inode);
154         int ret;
155
156         ret = gfs2_writepage_common(page, wbc);
157         if (ret <= 0)
158                 return ret;
159
160         if (!page_has_buffers(page)) {
161                 create_empty_buffers(page, inode->i_sb->s_blocksize,
162                                      (1 << BH_Dirty)|(1 << BH_Uptodate));
163         }
164         gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
165         return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
166 }
167
168 /**
169  * __gfs2_jdata_writepage - The core of jdata writepage
170  * @page: The page to write
171  * @wbc: The writeback control
172  *
173  * This is shared between writepage and writepages and implements the
174  * core of the writepage operation. If a transaction is required then
175  * PageChecked will have been set and the transaction will have
176  * already been started before this is called.
177  */
178
179 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
180 {
181         struct inode *inode = page->mapping->host;
182         struct gfs2_inode *ip = GFS2_I(inode);
183         struct gfs2_sbd *sdp = GFS2_SB(inode);
184
185         if (PageChecked(page)) {
186                 ClearPageChecked(page);
187                 if (!page_has_buffers(page)) {
188                         create_empty_buffers(page, inode->i_sb->s_blocksize,
189                                              (1 << BH_Dirty)|(1 << BH_Uptodate));
190                 }
191                 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
192         }
193         return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
194 }
195
196 /**
197  * gfs2_jdata_writepage - Write complete page
198  * @page: Page to write
199  *
200  * Returns: errno
201  *
202  */
203
204 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
205 {
206         struct inode *inode = page->mapping->host;
207         struct gfs2_sbd *sdp = GFS2_SB(inode);
208         int ret;
209         int done_trans = 0;
210
211         if (PageChecked(page)) {
212                 if (wbc->sync_mode != WB_SYNC_ALL)
213                         goto out_ignore;
214                 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
215                 if (ret)
216                         goto out_ignore;
217                 done_trans = 1;
218         }
219         ret = gfs2_writepage_common(page, wbc);
220         if (ret > 0)
221                 ret = __gfs2_jdata_writepage(page, wbc);
222         if (done_trans)
223                 gfs2_trans_end(sdp);
224         return ret;
225
226 out_ignore:
227         redirty_page_for_writepage(wbc, page);
228         unlock_page(page);
229         return 0;
230 }
231
232 /**
233  * gfs2_writepages - Write a bunch of dirty pages back to disk
234  * @mapping: The mapping to write
235  * @wbc: Write-back control
236  *
237  * Used for both ordered and writeback modes.
238  */
239 static int gfs2_writepages(struct address_space *mapping,
240                            struct writeback_control *wbc)
241 {
242         return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
243 }
244
245 /**
246  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
247  * @mapping: The mapping
248  * @wbc: The writeback control
249  * @writepage: The writepage function to call for each page
250  * @pvec: The vector of pages
251  * @nr_pages: The number of pages to write
252  *
253  * Returns: non-zero if loop should terminate, zero otherwise
254  */
255
256 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
257                                     struct writeback_control *wbc,
258                                     struct pagevec *pvec,
259                                     int nr_pages, pgoff_t end)
260 {
261         struct inode *inode = mapping->host;
262         struct gfs2_sbd *sdp = GFS2_SB(inode);
263         loff_t i_size = i_size_read(inode);
264         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
265         unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
266         unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
267         int i;
268         int ret;
269
270         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
271         if (ret < 0)
272                 return ret;
273
274         for(i = 0; i < nr_pages; i++) {
275                 struct page *page = pvec->pages[i];
276
277                 lock_page(page);
278
279                 if (unlikely(page->mapping != mapping)) {
280                         unlock_page(page);
281                         continue;
282                 }
283
284                 if (!wbc->range_cyclic && page->index > end) {
285                         ret = 1;
286                         unlock_page(page);
287                         continue;
288                 }
289
290                 if (wbc->sync_mode != WB_SYNC_NONE)
291                         wait_on_page_writeback(page);
292
293                 if (PageWriteback(page) ||
294                     !clear_page_dirty_for_io(page)) {
295                         unlock_page(page);
296                         continue;
297                 }
298
299                 /* Is the page fully outside i_size? (truncate in progress) */
300                 if (page->index > end_index || (page->index == end_index && !offset)) {
301                         page->mapping->a_ops->invalidatepage(page, 0);
302                         unlock_page(page);
303                         continue;
304                 }
305
306                 ret = __gfs2_jdata_writepage(page, wbc);
307
308                 if (ret || (--(wbc->nr_to_write) <= 0))
309                         ret = 1;
310         }
311         gfs2_trans_end(sdp);
312         return ret;
313 }
314
315 /**
316  * gfs2_write_cache_jdata - Like write_cache_pages but different
317  * @mapping: The mapping to write
318  * @wbc: The writeback control
319  * @writepage: The writepage function to call
320  * @data: The data to pass to writepage
321  *
322  * The reason that we use our own function here is that we need to
323  * start transactions before we grab page locks. This allows us
324  * to get the ordering right.
325  */
326
327 static int gfs2_write_cache_jdata(struct address_space *mapping,
328                                   struct writeback_control *wbc)
329 {
330         int ret = 0;
331         int done = 0;
332         struct pagevec pvec;
333         int nr_pages;
334         pgoff_t index;
335         pgoff_t end;
336         int scanned = 0;
337         int range_whole = 0;
338
339         pagevec_init(&pvec, 0);
340         if (wbc->range_cyclic) {
341                 index = mapping->writeback_index; /* Start from prev offset */
342                 end = -1;
343         } else {
344                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
345                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
346                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
347                         range_whole = 1;
348                 scanned = 1;
349         }
350
351 retry:
352          while (!done && (index <= end) &&
353                 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
354                                                PAGECACHE_TAG_DIRTY,
355                                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
356                 scanned = 1;
357                 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
358                 if (ret)
359                         done = 1;
360                 if (ret > 0)
361                         ret = 0;
362
363                 pagevec_release(&pvec);
364                 cond_resched();
365         }
366
367         if (!scanned && !done) {
368                 /*
369                  * We hit the last page and there is more work to be done: wrap
370                  * back to the start of the file
371                  */
372                 scanned = 1;
373                 index = 0;
374                 goto retry;
375         }
376
377         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
378                 mapping->writeback_index = index;
379         return ret;
380 }
381
382
383 /**
384  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
385  * @mapping: The mapping to write
386  * @wbc: The writeback control
387  * 
388  */
389
390 static int gfs2_jdata_writepages(struct address_space *mapping,
391                                  struct writeback_control *wbc)
392 {
393         struct gfs2_inode *ip = GFS2_I(mapping->host);
394         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
395         int ret;
396
397         ret = gfs2_write_cache_jdata(mapping, wbc);
398         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
399                 gfs2_log_flush(sdp, ip->i_gl);
400                 ret = gfs2_write_cache_jdata(mapping, wbc);
401         }
402         return ret;
403 }
404
405 /**
406  * stuffed_readpage - Fill in a Linux page with stuffed file data
407  * @ip: the inode
408  * @page: the page
409  *
410  * Returns: errno
411  */
412
413 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
414 {
415         struct buffer_head *dibh;
416         u64 dsize = i_size_read(&ip->i_inode);
417         void *kaddr;
418         int error;
419
420         /*
421          * Due to the order of unstuffing files and ->fault(), we can be
422          * asked for a zero page in the case of a stuffed file being extended,
423          * so we need to supply one here. It doesn't happen often.
424          */
425         if (unlikely(page->index)) {
426                 zero_user(page, 0, PAGE_CACHE_SIZE);
427                 SetPageUptodate(page);
428                 return 0;
429         }
430
431         error = gfs2_meta_inode_buffer(ip, &dibh);
432         if (error)
433                 return error;
434
435         kaddr = kmap_atomic(page);
436         if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
437                 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
438         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
439         memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
440         kunmap_atomic(kaddr);
441         flush_dcache_page(page);
442         brelse(dibh);
443         SetPageUptodate(page);
444
445         return 0;
446 }
447
448
449 /**
450  * __gfs2_readpage - readpage
451  * @file: The file to read a page for
452  * @page: The page to read
453  *
454  * This is the core of gfs2's readpage. Its used by the internal file
455  * reading code as in that case we already hold the glock. Also its
456  * called by gfs2_readpage() once the required lock has been granted.
457  *
458  */
459
460 static int __gfs2_readpage(void *file, struct page *page)
461 {
462         struct gfs2_inode *ip = GFS2_I(page->mapping->host);
463         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
464         int error;
465
466         if (gfs2_is_stuffed(ip)) {
467                 error = stuffed_readpage(ip, page);
468                 unlock_page(page);
469         } else {
470                 error = mpage_readpage(page, gfs2_block_map);
471         }
472
473         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
474                 return -EIO;
475
476         return error;
477 }
478
479 /**
480  * gfs2_readpage - read a page of a file
481  * @file: The file to read
482  * @page: The page of the file
483  *
484  * This deals with the locking required. We have to unlock and
485  * relock the page in order to get the locking in the right
486  * order.
487  */
488
489 static int gfs2_readpage(struct file *file, struct page *page)
490 {
491         struct address_space *mapping = page->mapping;
492         struct gfs2_inode *ip = GFS2_I(mapping->host);
493         struct gfs2_holder gh;
494         int error;
495
496         unlock_page(page);
497         gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
498         error = gfs2_glock_nq(&gh);
499         if (unlikely(error))
500                 goto out;
501         error = AOP_TRUNCATED_PAGE;
502         lock_page(page);
503         if (page->mapping == mapping && !PageUptodate(page))
504                 error = __gfs2_readpage(file, page);
505         else
506                 unlock_page(page);
507         gfs2_glock_dq(&gh);
508 out:
509         gfs2_holder_uninit(&gh);
510         if (error && error != AOP_TRUNCATED_PAGE)
511                 lock_page(page);
512         return error;
513 }
514
515 /**
516  * gfs2_internal_read - read an internal file
517  * @ip: The gfs2 inode
518  * @buf: The buffer to fill
519  * @pos: The file position
520  * @size: The amount to read
521  *
522  */
523
524 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
525                        unsigned size)
526 {
527         struct address_space *mapping = ip->i_inode.i_mapping;
528         unsigned long index = *pos / PAGE_CACHE_SIZE;
529         unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
530         unsigned copied = 0;
531         unsigned amt;
532         struct page *page;
533         void *p;
534
535         do {
536                 amt = size - copied;
537                 if (offset + size > PAGE_CACHE_SIZE)
538                         amt = PAGE_CACHE_SIZE - offset;
539                 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
540                 if (IS_ERR(page))
541                         return PTR_ERR(page);
542                 p = kmap_atomic(page);
543                 memcpy(buf + copied, p + offset, amt);
544                 kunmap_atomic(p);
545                 mark_page_accessed(page);
546                 page_cache_release(page);
547                 copied += amt;
548                 index++;
549                 offset = 0;
550         } while(copied < size);
551         (*pos) += size;
552         return size;
553 }
554
555 /**
556  * gfs2_readpages - Read a bunch of pages at once
557  *
558  * Some notes:
559  * 1. This is only for readahead, so we can simply ignore any things
560  *    which are slightly inconvenient (such as locking conflicts between
561  *    the page lock and the glock) and return having done no I/O. Its
562  *    obviously not something we'd want to do on too regular a basis.
563  *    Any I/O we ignore at this time will be done via readpage later.
564  * 2. We don't handle stuffed files here we let readpage do the honours.
565  * 3. mpage_readpages() does most of the heavy lifting in the common case.
566  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
567  */
568
569 static int gfs2_readpages(struct file *file, struct address_space *mapping,
570                           struct list_head *pages, unsigned nr_pages)
571 {
572         struct inode *inode = mapping->host;
573         struct gfs2_inode *ip = GFS2_I(inode);
574         struct gfs2_sbd *sdp = GFS2_SB(inode);
575         struct gfs2_holder gh;
576         int ret;
577
578         gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
579         ret = gfs2_glock_nq(&gh);
580         if (unlikely(ret))
581                 goto out_uninit;
582         if (!gfs2_is_stuffed(ip))
583                 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
584         gfs2_glock_dq(&gh);
585 out_uninit:
586         gfs2_holder_uninit(&gh);
587         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
588                 ret = -EIO;
589         return ret;
590 }
591
592 /**
593  * gfs2_write_begin - Begin to write to a file
594  * @file: The file to write to
595  * @mapping: The mapping in which to write
596  * @pos: The file offset at which to start writing
597  * @len: Length of the write
598  * @flags: Various flags
599  * @pagep: Pointer to return the page
600  * @fsdata: Pointer to return fs data (unused by GFS2)
601  *
602  * Returns: errno
603  */
604
605 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
606                             loff_t pos, unsigned len, unsigned flags,
607                             struct page **pagep, void **fsdata)
608 {
609         struct gfs2_inode *ip = GFS2_I(mapping->host);
610         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
611         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
612         unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
613         unsigned requested = 0;
614         int alloc_required;
615         int error = 0;
616         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
617         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
618         struct page *page;
619
620         gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
621         error = gfs2_glock_nq(&ip->i_gh);
622         if (unlikely(error))
623                 goto out_uninit;
624         if (&ip->i_inode == sdp->sd_rindex) {
625                 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
626                                            GL_NOCACHE, &m_ip->i_gh);
627                 if (unlikely(error)) {
628                         gfs2_glock_dq(&ip->i_gh);
629                         goto out_uninit;
630                 }
631         }
632
633         alloc_required = gfs2_write_alloc_required(ip, pos, len);
634
635         if (alloc_required || gfs2_is_jdata(ip))
636                 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
637
638         if (alloc_required) {
639                 error = gfs2_quota_lock_check(ip);
640                 if (error)
641                         goto out_unlock;
642
643                 requested = data_blocks + ind_blocks;
644                 error = gfs2_inplace_reserve(ip, requested, 0);
645                 if (error)
646                         goto out_qunlock;
647         }
648
649         rblocks = RES_DINODE + ind_blocks;
650         if (gfs2_is_jdata(ip))
651                 rblocks += data_blocks ? data_blocks : 1;
652         if (ind_blocks || data_blocks)
653                 rblocks += RES_STATFS + RES_QUOTA;
654         if (&ip->i_inode == sdp->sd_rindex)
655                 rblocks += 2 * RES_STATFS;
656         if (alloc_required)
657                 rblocks += gfs2_rg_blocks(ip, requested);
658
659         error = gfs2_trans_begin(sdp, rblocks,
660                                  PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
661         if (error)
662                 goto out_trans_fail;
663
664         error = -ENOMEM;
665         flags |= AOP_FLAG_NOFS;
666         page = grab_cache_page_write_begin(mapping, index, flags);
667         *pagep = page;
668         if (unlikely(!page))
669                 goto out_endtrans;
670
671         if (gfs2_is_stuffed(ip)) {
672                 error = 0;
673                 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
674                         error = gfs2_unstuff_dinode(ip, page);
675                         if (error == 0)
676                                 goto prepare_write;
677                 } else if (!PageUptodate(page)) {
678                         error = stuffed_readpage(ip, page);
679                 }
680                 goto out;
681         }
682
683 prepare_write:
684         error = __block_write_begin(page, from, len, gfs2_block_map);
685 out:
686         if (error == 0)
687                 return 0;
688
689         unlock_page(page);
690         page_cache_release(page);
691
692         gfs2_trans_end(sdp);
693         if (pos + len > ip->i_inode.i_size)
694                 gfs2_trim_blocks(&ip->i_inode);
695         goto out_trans_fail;
696
697 out_endtrans:
698         gfs2_trans_end(sdp);
699 out_trans_fail:
700         if (alloc_required) {
701                 gfs2_inplace_release(ip);
702 out_qunlock:
703                 gfs2_quota_unlock(ip);
704         }
705 out_unlock:
706         if (&ip->i_inode == sdp->sd_rindex) {
707                 gfs2_glock_dq(&m_ip->i_gh);
708                 gfs2_holder_uninit(&m_ip->i_gh);
709         }
710         gfs2_glock_dq(&ip->i_gh);
711 out_uninit:
712         gfs2_holder_uninit(&ip->i_gh);
713         return error;
714 }
715
716 /**
717  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
718  * @inode: the rindex inode
719  */
720 static void adjust_fs_space(struct inode *inode)
721 {
722         struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
723         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
724         struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
725         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
726         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
727         struct buffer_head *m_bh, *l_bh;
728         u64 fs_total, new_free;
729
730         /* Total up the file system space, according to the latest rindex. */
731         fs_total = gfs2_ri_total(sdp);
732         if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
733                 return;
734
735         spin_lock(&sdp->sd_statfs_spin);
736         gfs2_statfs_change_in(m_sc, m_bh->b_data +
737                               sizeof(struct gfs2_dinode));
738         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
739                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
740         else
741                 new_free = 0;
742         spin_unlock(&sdp->sd_statfs_spin);
743         fs_warn(sdp, "File system extended by %llu blocks.\n",
744                 (unsigned long long)new_free);
745         gfs2_statfs_change(sdp, new_free, new_free, 0);
746
747         if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
748                 goto out;
749         update_statfs(sdp, m_bh, l_bh);
750         brelse(l_bh);
751 out:
752         brelse(m_bh);
753 }
754
755 /**
756  * gfs2_stuffed_write_end - Write end for stuffed files
757  * @inode: The inode
758  * @dibh: The buffer_head containing the on-disk inode
759  * @pos: The file position
760  * @len: The length of the write
761  * @copied: How much was actually copied by the VFS
762  * @page: The page
763  *
764  * This copies the data from the page into the inode block after
765  * the inode data structure itself.
766  *
767  * Returns: errno
768  */
769 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
770                                   loff_t pos, unsigned len, unsigned copied,
771                                   struct page *page)
772 {
773         struct gfs2_inode *ip = GFS2_I(inode);
774         struct gfs2_sbd *sdp = GFS2_SB(inode);
775         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
776         u64 to = pos + copied;
777         void *kaddr;
778         unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
779
780         BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
781         kaddr = kmap_atomic(page);
782         memcpy(buf + pos, kaddr + pos, copied);
783         memset(kaddr + pos + copied, 0, len - copied);
784         flush_dcache_page(page);
785         kunmap_atomic(kaddr);
786
787         if (!PageUptodate(page))
788                 SetPageUptodate(page);
789         unlock_page(page);
790         page_cache_release(page);
791
792         if (copied) {
793                 if (inode->i_size < to)
794                         i_size_write(inode, to);
795                 mark_inode_dirty(inode);
796         }
797
798         if (inode == sdp->sd_rindex) {
799                 adjust_fs_space(inode);
800                 sdp->sd_rindex_uptodate = 0;
801         }
802
803         brelse(dibh);
804         gfs2_trans_end(sdp);
805         if (inode == sdp->sd_rindex) {
806                 gfs2_glock_dq(&m_ip->i_gh);
807                 gfs2_holder_uninit(&m_ip->i_gh);
808         }
809         gfs2_glock_dq(&ip->i_gh);
810         gfs2_holder_uninit(&ip->i_gh);
811         return copied;
812 }
813
814 /**
815  * gfs2_write_end
816  * @file: The file to write to
817  * @mapping: The address space to write to
818  * @pos: The file position
819  * @len: The length of the data
820  * @copied:
821  * @page: The page that has been written
822  * @fsdata: The fsdata (unused in GFS2)
823  *
824  * The main write_end function for GFS2. We have a separate one for
825  * stuffed files as they are slightly different, otherwise we just
826  * put our locking around the VFS provided functions.
827  *
828  * Returns: errno
829  */
830
831 static int gfs2_write_end(struct file *file, struct address_space *mapping,
832                           loff_t pos, unsigned len, unsigned copied,
833                           struct page *page, void *fsdata)
834 {
835         struct inode *inode = page->mapping->host;
836         struct gfs2_inode *ip = GFS2_I(inode);
837         struct gfs2_sbd *sdp = GFS2_SB(inode);
838         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
839         struct buffer_head *dibh;
840         unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
841         unsigned int to = from + len;
842         int ret;
843
844         BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
845
846         ret = gfs2_meta_inode_buffer(ip, &dibh);
847         if (unlikely(ret)) {
848                 unlock_page(page);
849                 page_cache_release(page);
850                 goto failed;
851         }
852
853         gfs2_trans_add_meta(ip->i_gl, dibh);
854
855         if (gfs2_is_stuffed(ip))
856                 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
857
858         if (!gfs2_is_writeback(ip))
859                 gfs2_page_add_databufs(ip, page, from, to);
860
861         ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
862
863         if (inode == sdp->sd_rindex) {
864                 adjust_fs_space(inode);
865                 sdp->sd_rindex_uptodate = 0;
866         }
867
868         brelse(dibh);
869 failed:
870         gfs2_trans_end(sdp);
871         gfs2_inplace_release(ip);
872         if (ip->i_res->rs_qa_qd_num)
873                 gfs2_quota_unlock(ip);
874         if (inode == sdp->sd_rindex) {
875                 gfs2_glock_dq(&m_ip->i_gh);
876                 gfs2_holder_uninit(&m_ip->i_gh);
877         }
878         gfs2_glock_dq(&ip->i_gh);
879         gfs2_holder_uninit(&ip->i_gh);
880         return ret;
881 }
882
883 /**
884  * gfs2_set_page_dirty - Page dirtying function
885  * @page: The page to dirty
886  *
887  * Returns: 1 if it dirtyed the page, or 0 otherwise
888  */
889  
890 static int gfs2_set_page_dirty(struct page *page)
891 {
892         SetPageChecked(page);
893         return __set_page_dirty_buffers(page);
894 }
895
896 /**
897  * gfs2_bmap - Block map function
898  * @mapping: Address space info
899  * @lblock: The block to map
900  *
901  * Returns: The disk address for the block or 0 on hole or error
902  */
903
904 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
905 {
906         struct gfs2_inode *ip = GFS2_I(mapping->host);
907         struct gfs2_holder i_gh;
908         sector_t dblock = 0;
909         int error;
910
911         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
912         if (error)
913                 return 0;
914
915         if (!gfs2_is_stuffed(ip))
916                 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
917
918         gfs2_glock_dq_uninit(&i_gh);
919
920         return dblock;
921 }
922
923 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
924 {
925         struct gfs2_bufdata *bd;
926
927         lock_buffer(bh);
928         gfs2_log_lock(sdp);
929         clear_buffer_dirty(bh);
930         bd = bh->b_private;
931         if (bd) {
932                 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
933                         list_del_init(&bd->bd_list);
934                 else
935                         gfs2_remove_from_journal(bh, current->journal_info, 0);
936         }
937         bh->b_bdev = NULL;
938         clear_buffer_mapped(bh);
939         clear_buffer_req(bh);
940         clear_buffer_new(bh);
941         gfs2_log_unlock(sdp);
942         unlock_buffer(bh);
943 }
944
945 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
946 {
947         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
948         struct buffer_head *bh, *head;
949         unsigned long pos = 0;
950
951         BUG_ON(!PageLocked(page));
952         if (offset == 0)
953                 ClearPageChecked(page);
954         if (!page_has_buffers(page))
955                 goto out;
956
957         bh = head = page_buffers(page);
958         do {
959                 if (offset <= pos)
960                         gfs2_discard(sdp, bh);
961                 pos += bh->b_size;
962                 bh = bh->b_this_page;
963         } while (bh != head);
964 out:
965         if (offset == 0)
966                 try_to_release_page(page, 0);
967 }
968
969 /**
970  * gfs2_ok_for_dio - check that dio is valid on this file
971  * @ip: The inode
972  * @rw: READ or WRITE
973  * @offset: The offset at which we are reading or writing
974  *
975  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
976  *          1 (to accept the i/o request)
977  */
978 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
979 {
980         /*
981          * Should we return an error here? I can't see that O_DIRECT for
982          * a stuffed file makes any sense. For now we'll silently fall
983          * back to buffered I/O
984          */
985         if (gfs2_is_stuffed(ip))
986                 return 0;
987
988         if (offset >= i_size_read(&ip->i_inode))
989                 return 0;
990         return 1;
991 }
992
993
994
995 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
996                               const struct iovec *iov, loff_t offset,
997                               unsigned long nr_segs)
998 {
999         struct file *file = iocb->ki_filp;
1000         struct inode *inode = file->f_mapping->host;
1001         struct gfs2_inode *ip = GFS2_I(inode);
1002         struct gfs2_holder gh;
1003         int rv;
1004
1005         /*
1006          * Deferred lock, even if its a write, since we do no allocation
1007          * on this path. All we need change is atime, and this lock mode
1008          * ensures that other nodes have flushed their buffered read caches
1009          * (i.e. their page cache entries for this inode). We do not,
1010          * unfortunately have the option of only flushing a range like
1011          * the VFS does.
1012          */
1013         gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1014         rv = gfs2_glock_nq(&gh);
1015         if (rv)
1016                 return rv;
1017         rv = gfs2_ok_for_dio(ip, rw, offset);
1018         if (rv != 1)
1019                 goto out; /* dio not valid, fall back to buffered i/o */
1020
1021         rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1022                                   offset, nr_segs, gfs2_get_block_direct,
1023                                   NULL, NULL, 0);
1024 out:
1025         gfs2_glock_dq(&gh);
1026         gfs2_holder_uninit(&gh);
1027         return rv;
1028 }
1029
1030 /**
1031  * gfs2_releasepage - free the metadata associated with a page
1032  * @page: the page that's being released
1033  * @gfp_mask: passed from Linux VFS, ignored by us
1034  *
1035  * Call try_to_free_buffers() if the buffers in this page can be
1036  * released.
1037  *
1038  * Returns: 0
1039  */
1040
1041 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1042 {
1043         struct address_space *mapping = page->mapping;
1044         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
1045         struct buffer_head *bh, *head;
1046         struct gfs2_bufdata *bd;
1047
1048         if (!page_has_buffers(page))
1049                 return 0;
1050
1051         gfs2_log_lock(sdp);
1052         spin_lock(&sdp->sd_ail_lock);
1053         head = bh = page_buffers(page);
1054         do {
1055                 if (atomic_read(&bh->b_count))
1056                         goto cannot_release;
1057                 bd = bh->b_private;
1058                 if (bd && bd->bd_ail)
1059                         goto cannot_release;
1060                 if (buffer_pinned(bh) || buffer_dirty(bh))
1061                         goto not_possible;
1062                 bh = bh->b_this_page;
1063         } while(bh != head);
1064         spin_unlock(&sdp->sd_ail_lock);
1065         gfs2_log_unlock(sdp);
1066
1067         head = bh = page_buffers(page);
1068         do {
1069                 gfs2_log_lock(sdp);
1070                 bd = bh->b_private;
1071                 if (bd) {
1072                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
1073                         if (!list_empty(&bd->bd_list)) {
1074                                 if (!buffer_pinned(bh))
1075                                         list_del_init(&bd->bd_list);
1076                                 else
1077                                         bd = NULL;
1078                         }
1079                         if (bd)
1080                                 bd->bd_bh = NULL;
1081                         bh->b_private = NULL;
1082                 }
1083                 gfs2_log_unlock(sdp);
1084                 if (bd)
1085                         kmem_cache_free(gfs2_bufdata_cachep, bd);
1086
1087                 bh = bh->b_this_page;
1088         } while (bh != head);
1089
1090         return try_to_free_buffers(page);
1091
1092 not_possible: /* Should never happen */
1093         WARN_ON(buffer_dirty(bh));
1094         WARN_ON(buffer_pinned(bh));
1095 cannot_release:
1096         spin_unlock(&sdp->sd_ail_lock);
1097         gfs2_log_unlock(sdp);
1098         return 0;
1099 }
1100
1101 static const struct address_space_operations gfs2_writeback_aops = {
1102         .writepage = gfs2_writeback_writepage,
1103         .writepages = gfs2_writepages,
1104         .readpage = gfs2_readpage,
1105         .readpages = gfs2_readpages,
1106         .write_begin = gfs2_write_begin,
1107         .write_end = gfs2_write_end,
1108         .bmap = gfs2_bmap,
1109         .invalidatepage = gfs2_invalidatepage,
1110         .releasepage = gfs2_releasepage,
1111         .direct_IO = gfs2_direct_IO,
1112         .migratepage = buffer_migrate_page,
1113         .is_partially_uptodate = block_is_partially_uptodate,
1114         .error_remove_page = generic_error_remove_page,
1115 };
1116
1117 static const struct address_space_operations gfs2_ordered_aops = {
1118         .writepage = gfs2_ordered_writepage,
1119         .writepages = gfs2_writepages,
1120         .readpage = gfs2_readpage,
1121         .readpages = gfs2_readpages,
1122         .write_begin = gfs2_write_begin,
1123         .write_end = gfs2_write_end,
1124         .set_page_dirty = gfs2_set_page_dirty,
1125         .bmap = gfs2_bmap,
1126         .invalidatepage = gfs2_invalidatepage,
1127         .releasepage = gfs2_releasepage,
1128         .direct_IO = gfs2_direct_IO,
1129         .migratepage = buffer_migrate_page,
1130         .is_partially_uptodate = block_is_partially_uptodate,
1131         .error_remove_page = generic_error_remove_page,
1132 };
1133
1134 static const struct address_space_operations gfs2_jdata_aops = {
1135         .writepage = gfs2_jdata_writepage,
1136         .writepages = gfs2_jdata_writepages,
1137         .readpage = gfs2_readpage,
1138         .readpages = gfs2_readpages,
1139         .write_begin = gfs2_write_begin,
1140         .write_end = gfs2_write_end,
1141         .set_page_dirty = gfs2_set_page_dirty,
1142         .bmap = gfs2_bmap,
1143         .invalidatepage = gfs2_invalidatepage,
1144         .releasepage = gfs2_releasepage,
1145         .is_partially_uptodate = block_is_partially_uptodate,
1146         .error_remove_page = generic_error_remove_page,
1147 };
1148
1149 void gfs2_set_aops(struct inode *inode)
1150 {
1151         struct gfs2_inode *ip = GFS2_I(inode);
1152
1153         if (gfs2_is_writeback(ip))
1154                 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1155         else if (gfs2_is_ordered(ip))
1156                 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1157         else if (gfs2_is_jdata(ip))
1158                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1159         else
1160                 BUG();
1161 }
1162