6df0c41578afc4457f8bed383f64450c0cd5bf41
[~shefty/rdma-dev.git] / drivers / md / raid1.c
1 /*
2  * raid1.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * RAID-1 management functions.
9  *
10  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11  *
12  * Fixes to reconstruction by Jakob Ć˜stergaard" <jakob@ostenfeld.dk>
13  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14  *
15  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16  * bitmapped intelligence in resync:
17  *
18  *      - bitmap marked during normal i/o
19  *      - bitmap used to skip nondirty blocks during sync
20  *
21  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22  * - persistent bitmap code
23  *
24  * This program is free software; you can redistribute it and/or modify
25  * it under the terms of the GNU General Public License as published by
26  * the Free Software Foundation; either version 2, or (at your option)
27  * any later version.
28  *
29  * You should have received a copy of the GNU General Public License
30  * (for example /usr/src/linux/COPYING); if not, write to the Free
31  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32  */
33
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/blkdev.h>
37 #include <linux/module.h>
38 #include <linux/seq_file.h>
39 #include <linux/ratelimit.h>
40 #include "md.h"
41 #include "raid1.h"
42 #include "bitmap.h"
43
44 /*
45  * Number of guaranteed r1bios in case of extreme VM load:
46  */
47 #define NR_RAID1_BIOS 256
48
49 /* When there are this many requests queue to be written by
50  * the raid1 thread, we become 'congested' to provide back-pressure
51  * for writeback.
52  */
53 static int max_queued_requests = 1024;
54
55 static void allow_barrier(struct r1conf *conf);
56 static void lower_barrier(struct r1conf *conf);
57
58 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
59 {
60         struct pool_info *pi = data;
61         int size = offsetof(struct r1bio, bios[pi->raid_disks]);
62
63         /* allocate a r1bio with room for raid_disks entries in the bios array */
64         return kzalloc(size, gfp_flags);
65 }
66
67 static void r1bio_pool_free(void *r1_bio, void *data)
68 {
69         kfree(r1_bio);
70 }
71
72 #define RESYNC_BLOCK_SIZE (64*1024)
73 //#define RESYNC_BLOCK_SIZE PAGE_SIZE
74 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
75 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
76 #define RESYNC_WINDOW (2048*1024)
77
78 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
79 {
80         struct pool_info *pi = data;
81         struct page *page;
82         struct r1bio *r1_bio;
83         struct bio *bio;
84         int i, j;
85
86         r1_bio = r1bio_pool_alloc(gfp_flags, pi);
87         if (!r1_bio)
88                 return NULL;
89
90         /*
91          * Allocate bios : 1 for reading, n-1 for writing
92          */
93         for (j = pi->raid_disks ; j-- ; ) {
94                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
95                 if (!bio)
96                         goto out_free_bio;
97                 r1_bio->bios[j] = bio;
98         }
99         /*
100          * Allocate RESYNC_PAGES data pages and attach them to
101          * the first bio.
102          * If this is a user-requested check/repair, allocate
103          * RESYNC_PAGES for each bio.
104          */
105         if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
106                 j = pi->raid_disks;
107         else
108                 j = 1;
109         while(j--) {
110                 bio = r1_bio->bios[j];
111                 for (i = 0; i < RESYNC_PAGES; i++) {
112                         page = alloc_page(gfp_flags);
113                         if (unlikely(!page))
114                                 goto out_free_pages;
115
116                         bio->bi_io_vec[i].bv_page = page;
117                         bio->bi_vcnt = i+1;
118                 }
119         }
120         /* If not user-requests, copy the page pointers to all bios */
121         if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
122                 for (i=0; i<RESYNC_PAGES ; i++)
123                         for (j=1; j<pi->raid_disks; j++)
124                                 r1_bio->bios[j]->bi_io_vec[i].bv_page =
125                                         r1_bio->bios[0]->bi_io_vec[i].bv_page;
126         }
127
128         r1_bio->master_bio = NULL;
129
130         return r1_bio;
131
132 out_free_pages:
133         for (j=0 ; j < pi->raid_disks; j++)
134                 for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
135                         put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
136         j = -1;
137 out_free_bio:
138         while (++j < pi->raid_disks)
139                 bio_put(r1_bio->bios[j]);
140         r1bio_pool_free(r1_bio, data);
141         return NULL;
142 }
143
144 static void r1buf_pool_free(void *__r1_bio, void *data)
145 {
146         struct pool_info *pi = data;
147         int i,j;
148         struct r1bio *r1bio = __r1_bio;
149
150         for (i = 0; i < RESYNC_PAGES; i++)
151                 for (j = pi->raid_disks; j-- ;) {
152                         if (j == 0 ||
153                             r1bio->bios[j]->bi_io_vec[i].bv_page !=
154                             r1bio->bios[0]->bi_io_vec[i].bv_page)
155                                 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
156                 }
157         for (i=0 ; i < pi->raid_disks; i++)
158                 bio_put(r1bio->bios[i]);
159
160         r1bio_pool_free(r1bio, data);
161 }
162
163 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
164 {
165         int i;
166
167         for (i = 0; i < conf->raid_disks * 2; i++) {
168                 struct bio **bio = r1_bio->bios + i;
169                 if (!BIO_SPECIAL(*bio))
170                         bio_put(*bio);
171                 *bio = NULL;
172         }
173 }
174
175 static void free_r1bio(struct r1bio *r1_bio)
176 {
177         struct r1conf *conf = r1_bio->mddev->private;
178
179         put_all_bios(conf, r1_bio);
180         mempool_free(r1_bio, conf->r1bio_pool);
181 }
182
183 static void put_buf(struct r1bio *r1_bio)
184 {
185         struct r1conf *conf = r1_bio->mddev->private;
186         int i;
187
188         for (i = 0; i < conf->raid_disks * 2; i++) {
189                 struct bio *bio = r1_bio->bios[i];
190                 if (bio->bi_end_io)
191                         rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
192         }
193
194         mempool_free(r1_bio, conf->r1buf_pool);
195
196         lower_barrier(conf);
197 }
198
199 static void reschedule_retry(struct r1bio *r1_bio)
200 {
201         unsigned long flags;
202         struct mddev *mddev = r1_bio->mddev;
203         struct r1conf *conf = mddev->private;
204
205         spin_lock_irqsave(&conf->device_lock, flags);
206         list_add(&r1_bio->retry_list, &conf->retry_list);
207         conf->nr_queued ++;
208         spin_unlock_irqrestore(&conf->device_lock, flags);
209
210         wake_up(&conf->wait_barrier);
211         md_wakeup_thread(mddev->thread);
212 }
213
214 /*
215  * raid_end_bio_io() is called when we have finished servicing a mirrored
216  * operation and are ready to return a success/failure code to the buffer
217  * cache layer.
218  */
219 static void call_bio_endio(struct r1bio *r1_bio)
220 {
221         struct bio *bio = r1_bio->master_bio;
222         int done;
223         struct r1conf *conf = r1_bio->mddev->private;
224
225         if (bio->bi_phys_segments) {
226                 unsigned long flags;
227                 spin_lock_irqsave(&conf->device_lock, flags);
228                 bio->bi_phys_segments--;
229                 done = (bio->bi_phys_segments == 0);
230                 spin_unlock_irqrestore(&conf->device_lock, flags);
231         } else
232                 done = 1;
233
234         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
235                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
236         if (done) {
237                 bio_endio(bio, 0);
238                 /*
239                  * Wake up any possible resync thread that waits for the device
240                  * to go idle.
241                  */
242                 allow_barrier(conf);
243         }
244 }
245
246 static void raid_end_bio_io(struct r1bio *r1_bio)
247 {
248         struct bio *bio = r1_bio->master_bio;
249
250         /* if nobody has done the final endio yet, do it now */
251         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
252                 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
253                          (bio_data_dir(bio) == WRITE) ? "write" : "read",
254                          (unsigned long long) bio->bi_sector,
255                          (unsigned long long) bio->bi_sector +
256                          (bio->bi_size >> 9) - 1);
257
258                 call_bio_endio(r1_bio);
259         }
260         free_r1bio(r1_bio);
261 }
262
263 /*
264  * Update disk head position estimator based on IRQ completion info.
265  */
266 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
267 {
268         struct r1conf *conf = r1_bio->mddev->private;
269
270         conf->mirrors[disk].head_position =
271                 r1_bio->sector + (r1_bio->sectors);
272 }
273
274 /*
275  * Find the disk number which triggered given bio
276  */
277 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
278 {
279         int mirror;
280         struct r1conf *conf = r1_bio->mddev->private;
281         int raid_disks = conf->raid_disks;
282
283         for (mirror = 0; mirror < raid_disks * 2; mirror++)
284                 if (r1_bio->bios[mirror] == bio)
285                         break;
286
287         BUG_ON(mirror == raid_disks * 2);
288         update_head_pos(mirror, r1_bio);
289
290         return mirror;
291 }
292
293 static void raid1_end_read_request(struct bio *bio, int error)
294 {
295         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
296         struct r1bio *r1_bio = bio->bi_private;
297         int mirror;
298         struct r1conf *conf = r1_bio->mddev->private;
299
300         mirror = r1_bio->read_disk;
301         /*
302          * this branch is our 'one mirror IO has finished' event handler:
303          */
304         update_head_pos(mirror, r1_bio);
305
306         if (uptodate)
307                 set_bit(R1BIO_Uptodate, &r1_bio->state);
308         else {
309                 /* If all other devices have failed, we want to return
310                  * the error upwards rather than fail the last device.
311                  * Here we redefine "uptodate" to mean "Don't want to retry"
312                  */
313                 unsigned long flags;
314                 spin_lock_irqsave(&conf->device_lock, flags);
315                 if (r1_bio->mddev->degraded == conf->raid_disks ||
316                     (r1_bio->mddev->degraded == conf->raid_disks-1 &&
317                      !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
318                         uptodate = 1;
319                 spin_unlock_irqrestore(&conf->device_lock, flags);
320         }
321
322         if (uptodate)
323                 raid_end_bio_io(r1_bio);
324         else {
325                 /*
326                  * oops, read error:
327                  */
328                 char b[BDEVNAME_SIZE];
329                 printk_ratelimited(
330                         KERN_ERR "md/raid1:%s: %s: "
331                         "rescheduling sector %llu\n",
332                         mdname(conf->mddev),
333                         bdevname(conf->mirrors[mirror].rdev->bdev,
334                                  b),
335                         (unsigned long long)r1_bio->sector);
336                 set_bit(R1BIO_ReadError, &r1_bio->state);
337                 reschedule_retry(r1_bio);
338         }
339
340         rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
341 }
342
343 static void close_write(struct r1bio *r1_bio)
344 {
345         /* it really is the end of this request */
346         if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
347                 /* free extra copy of the data pages */
348                 int i = r1_bio->behind_page_count;
349                 while (i--)
350                         safe_put_page(r1_bio->behind_bvecs[i].bv_page);
351                 kfree(r1_bio->behind_bvecs);
352                 r1_bio->behind_bvecs = NULL;
353         }
354         /* clear the bitmap if all writes complete successfully */
355         bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
356                         r1_bio->sectors,
357                         !test_bit(R1BIO_Degraded, &r1_bio->state),
358                         test_bit(R1BIO_BehindIO, &r1_bio->state));
359         md_write_end(r1_bio->mddev);
360 }
361
362 static void r1_bio_write_done(struct r1bio *r1_bio)
363 {
364         if (!atomic_dec_and_test(&r1_bio->remaining))
365                 return;
366
367         if (test_bit(R1BIO_WriteError, &r1_bio->state))
368                 reschedule_retry(r1_bio);
369         else {
370                 close_write(r1_bio);
371                 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
372                         reschedule_retry(r1_bio);
373                 else
374                         raid_end_bio_io(r1_bio);
375         }
376 }
377
378 static void raid1_end_write_request(struct bio *bio, int error)
379 {
380         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
381         struct r1bio *r1_bio = bio->bi_private;
382         int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
383         struct r1conf *conf = r1_bio->mddev->private;
384         struct bio *to_put = NULL;
385
386         mirror = find_bio_disk(r1_bio, bio);
387
388         /*
389          * 'one mirror IO has finished' event handler:
390          */
391         if (!uptodate) {
392                 set_bit(WriteErrorSeen,
393                         &conf->mirrors[mirror].rdev->flags);
394                 set_bit(R1BIO_WriteError, &r1_bio->state);
395         } else {
396                 /*
397                  * Set R1BIO_Uptodate in our master bio, so that we
398                  * will return a good error code for to the higher
399                  * levels even if IO on some other mirrored buffer
400                  * fails.
401                  *
402                  * The 'master' represents the composite IO operation
403                  * to user-side. So if something waits for IO, then it
404                  * will wait for the 'master' bio.
405                  */
406                 sector_t first_bad;
407                 int bad_sectors;
408
409                 r1_bio->bios[mirror] = NULL;
410                 to_put = bio;
411                 set_bit(R1BIO_Uptodate, &r1_bio->state);
412
413                 /* Maybe we can clear some bad blocks. */
414                 if (is_badblock(conf->mirrors[mirror].rdev,
415                                 r1_bio->sector, r1_bio->sectors,
416                                 &first_bad, &bad_sectors)) {
417                         r1_bio->bios[mirror] = IO_MADE_GOOD;
418                         set_bit(R1BIO_MadeGood, &r1_bio->state);
419                 }
420         }
421
422         if (behind) {
423                 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
424                         atomic_dec(&r1_bio->behind_remaining);
425
426                 /*
427                  * In behind mode, we ACK the master bio once the I/O
428                  * has safely reached all non-writemostly
429                  * disks. Setting the Returned bit ensures that this
430                  * gets done only once -- we don't ever want to return
431                  * -EIO here, instead we'll wait
432                  */
433                 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
434                     test_bit(R1BIO_Uptodate, &r1_bio->state)) {
435                         /* Maybe we can return now */
436                         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
437                                 struct bio *mbio = r1_bio->master_bio;
438                                 pr_debug("raid1: behind end write sectors"
439                                          " %llu-%llu\n",
440                                          (unsigned long long) mbio->bi_sector,
441                                          (unsigned long long) mbio->bi_sector +
442                                          (mbio->bi_size >> 9) - 1);
443                                 call_bio_endio(r1_bio);
444                         }
445                 }
446         }
447         if (r1_bio->bios[mirror] == NULL)
448                 rdev_dec_pending(conf->mirrors[mirror].rdev,
449                                  conf->mddev);
450
451         /*
452          * Let's see if all mirrored write operations have finished
453          * already.
454          */
455         r1_bio_write_done(r1_bio);
456
457         if (to_put)
458                 bio_put(to_put);
459 }
460
461
462 /*
463  * This routine returns the disk from which the requested read should
464  * be done. There is a per-array 'next expected sequential IO' sector
465  * number - if this matches on the next IO then we use the last disk.
466  * There is also a per-disk 'last know head position' sector that is
467  * maintained from IRQ contexts, both the normal and the resync IO
468  * completion handlers update this position correctly. If there is no
469  * perfect sequential match then we pick the disk whose head is closest.
470  *
471  * If there are 2 mirrors in the same 2 devices, performance degrades
472  * because position is mirror, not device based.
473  *
474  * The rdev for the device selected will have nr_pending incremented.
475  */
476 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
477 {
478         const sector_t this_sector = r1_bio->sector;
479         int sectors;
480         int best_good_sectors;
481         int start_disk;
482         int best_disk;
483         int i;
484         sector_t best_dist;
485         struct md_rdev *rdev;
486         int choose_first;
487
488         rcu_read_lock();
489         /*
490          * Check if we can balance. We can balance on the whole
491          * device if no resync is going on, or below the resync window.
492          * We take the first readable disk when above the resync window.
493          */
494  retry:
495         sectors = r1_bio->sectors;
496         best_disk = -1;
497         best_dist = MaxSector;
498         best_good_sectors = 0;
499
500         if (conf->mddev->recovery_cp < MaxSector &&
501             (this_sector + sectors >= conf->next_resync)) {
502                 choose_first = 1;
503                 start_disk = 0;
504         } else {
505                 choose_first = 0;
506                 start_disk = conf->last_used;
507         }
508
509         for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
510                 sector_t dist;
511                 sector_t first_bad;
512                 int bad_sectors;
513
514                 int disk = start_disk + i;
515                 if (disk >= conf->raid_disks)
516                         disk -= conf->raid_disks;
517
518                 rdev = rcu_dereference(conf->mirrors[disk].rdev);
519                 if (r1_bio->bios[disk] == IO_BLOCKED
520                     || rdev == NULL
521                     || test_bit(Faulty, &rdev->flags))
522                         continue;
523                 if (!test_bit(In_sync, &rdev->flags) &&
524                     rdev->recovery_offset < this_sector + sectors)
525                         continue;
526                 if (test_bit(WriteMostly, &rdev->flags)) {
527                         /* Don't balance among write-mostly, just
528                          * use the first as a last resort */
529                         if (best_disk < 0)
530                                 best_disk = disk;
531                         continue;
532                 }
533                 /* This is a reasonable device to use.  It might
534                  * even be best.
535                  */
536                 if (is_badblock(rdev, this_sector, sectors,
537                                 &first_bad, &bad_sectors)) {
538                         if (best_dist < MaxSector)
539                                 /* already have a better device */
540                                 continue;
541                         if (first_bad <= this_sector) {
542                                 /* cannot read here. If this is the 'primary'
543                                  * device, then we must not read beyond
544                                  * bad_sectors from another device..
545                                  */
546                                 bad_sectors -= (this_sector - first_bad);
547                                 if (choose_first && sectors > bad_sectors)
548                                         sectors = bad_sectors;
549                                 if (best_good_sectors > sectors)
550                                         best_good_sectors = sectors;
551
552                         } else {
553                                 sector_t good_sectors = first_bad - this_sector;
554                                 if (good_sectors > best_good_sectors) {
555                                         best_good_sectors = good_sectors;
556                                         best_disk = disk;
557                                 }
558                                 if (choose_first)
559                                         break;
560                         }
561                         continue;
562                 } else
563                         best_good_sectors = sectors;
564
565                 dist = abs(this_sector - conf->mirrors[disk].head_position);
566                 if (choose_first
567                     /* Don't change to another disk for sequential reads */
568                     || conf->next_seq_sect == this_sector
569                     || dist == 0
570                     /* If device is idle, use it */
571                     || atomic_read(&rdev->nr_pending) == 0) {
572                         best_disk = disk;
573                         break;
574                 }
575                 if (dist < best_dist) {
576                         best_dist = dist;
577                         best_disk = disk;
578                 }
579         }
580
581         if (best_disk >= 0) {
582                 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
583                 if (!rdev)
584                         goto retry;
585                 atomic_inc(&rdev->nr_pending);
586                 if (test_bit(Faulty, &rdev->flags)) {
587                         /* cannot risk returning a device that failed
588                          * before we inc'ed nr_pending
589                          */
590                         rdev_dec_pending(rdev, conf->mddev);
591                         goto retry;
592                 }
593                 sectors = best_good_sectors;
594                 conf->next_seq_sect = this_sector + sectors;
595                 conf->last_used = best_disk;
596         }
597         rcu_read_unlock();
598         *max_sectors = sectors;
599
600         return best_disk;
601 }
602
603 int md_raid1_congested(struct mddev *mddev, int bits)
604 {
605         struct r1conf *conf = mddev->private;
606         int i, ret = 0;
607
608         if ((bits & (1 << BDI_async_congested)) &&
609             conf->pending_count >= max_queued_requests)
610                 return 1;
611
612         rcu_read_lock();
613         for (i = 0; i < conf->raid_disks; i++) {
614                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
615                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
616                         struct request_queue *q = bdev_get_queue(rdev->bdev);
617
618                         BUG_ON(!q);
619
620                         /* Note the '|| 1' - when read_balance prefers
621                          * non-congested targets, it can be removed
622                          */
623                         if ((bits & (1<<BDI_async_congested)) || 1)
624                                 ret |= bdi_congested(&q->backing_dev_info, bits);
625                         else
626                                 ret &= bdi_congested(&q->backing_dev_info, bits);
627                 }
628         }
629         rcu_read_unlock();
630         return ret;
631 }
632 EXPORT_SYMBOL_GPL(md_raid1_congested);
633
634 static int raid1_congested(void *data, int bits)
635 {
636         struct mddev *mddev = data;
637
638         return mddev_congested(mddev, bits) ||
639                 md_raid1_congested(mddev, bits);
640 }
641
642 static void flush_pending_writes(struct r1conf *conf)
643 {
644         /* Any writes that have been queued but are awaiting
645          * bitmap updates get flushed here.
646          */
647         spin_lock_irq(&conf->device_lock);
648
649         if (conf->pending_bio_list.head) {
650                 struct bio *bio;
651                 bio = bio_list_get(&conf->pending_bio_list);
652                 conf->pending_count = 0;
653                 spin_unlock_irq(&conf->device_lock);
654                 /* flush any pending bitmap writes to
655                  * disk before proceeding w/ I/O */
656                 bitmap_unplug(conf->mddev->bitmap);
657                 wake_up(&conf->wait_barrier);
658
659                 while (bio) { /* submit pending writes */
660                         struct bio *next = bio->bi_next;
661                         bio->bi_next = NULL;
662                         generic_make_request(bio);
663                         bio = next;
664                 }
665         } else
666                 spin_unlock_irq(&conf->device_lock);
667 }
668
669 /* Barriers....
670  * Sometimes we need to suspend IO while we do something else,
671  * either some resync/recovery, or reconfigure the array.
672  * To do this we raise a 'barrier'.
673  * The 'barrier' is a counter that can be raised multiple times
674  * to count how many activities are happening which preclude
675  * normal IO.
676  * We can only raise the barrier if there is no pending IO.
677  * i.e. if nr_pending == 0.
678  * We choose only to raise the barrier if no-one is waiting for the
679  * barrier to go down.  This means that as soon as an IO request
680  * is ready, no other operations which require a barrier will start
681  * until the IO request has had a chance.
682  *
683  * So: regular IO calls 'wait_barrier'.  When that returns there
684  *    is no backgroup IO happening,  It must arrange to call
685  *    allow_barrier when it has finished its IO.
686  * backgroup IO calls must call raise_barrier.  Once that returns
687  *    there is no normal IO happeing.  It must arrange to call
688  *    lower_barrier when the particular background IO completes.
689  */
690 #define RESYNC_DEPTH 32
691
692 static void raise_barrier(struct r1conf *conf)
693 {
694         spin_lock_irq(&conf->resync_lock);
695
696         /* Wait until no block IO is waiting */
697         wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
698                             conf->resync_lock, );
699
700         /* block any new IO from starting */
701         conf->barrier++;
702
703         /* Now wait for all pending IO to complete */
704         wait_event_lock_irq(conf->wait_barrier,
705                             !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
706                             conf->resync_lock, );
707
708         spin_unlock_irq(&conf->resync_lock);
709 }
710
711 static void lower_barrier(struct r1conf *conf)
712 {
713         unsigned long flags;
714         BUG_ON(conf->barrier <= 0);
715         spin_lock_irqsave(&conf->resync_lock, flags);
716         conf->barrier--;
717         spin_unlock_irqrestore(&conf->resync_lock, flags);
718         wake_up(&conf->wait_barrier);
719 }
720
721 static void wait_barrier(struct r1conf *conf)
722 {
723         spin_lock_irq(&conf->resync_lock);
724         if (conf->barrier) {
725                 conf->nr_waiting++;
726                 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
727                                     conf->resync_lock,
728                                     );
729                 conf->nr_waiting--;
730         }
731         conf->nr_pending++;
732         spin_unlock_irq(&conf->resync_lock);
733 }
734
735 static void allow_barrier(struct r1conf *conf)
736 {
737         unsigned long flags;
738         spin_lock_irqsave(&conf->resync_lock, flags);
739         conf->nr_pending--;
740         spin_unlock_irqrestore(&conf->resync_lock, flags);
741         wake_up(&conf->wait_barrier);
742 }
743
744 static void freeze_array(struct r1conf *conf)
745 {
746         /* stop syncio and normal IO and wait for everything to
747          * go quite.
748          * We increment barrier and nr_waiting, and then
749          * wait until nr_pending match nr_queued+1
750          * This is called in the context of one normal IO request
751          * that has failed. Thus any sync request that might be pending
752          * will be blocked by nr_pending, and we need to wait for
753          * pending IO requests to complete or be queued for re-try.
754          * Thus the number queued (nr_queued) plus this request (1)
755          * must match the number of pending IOs (nr_pending) before
756          * we continue.
757          */
758         spin_lock_irq(&conf->resync_lock);
759         conf->barrier++;
760         conf->nr_waiting++;
761         wait_event_lock_irq(conf->wait_barrier,
762                             conf->nr_pending == conf->nr_queued+1,
763                             conf->resync_lock,
764                             flush_pending_writes(conf));
765         spin_unlock_irq(&conf->resync_lock);
766 }
767 static void unfreeze_array(struct r1conf *conf)
768 {
769         /* reverse the effect of the freeze */
770         spin_lock_irq(&conf->resync_lock);
771         conf->barrier--;
772         conf->nr_waiting--;
773         wake_up(&conf->wait_barrier);
774         spin_unlock_irq(&conf->resync_lock);
775 }
776
777
778 /* duplicate the data pages for behind I/O 
779  */
780 static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
781 {
782         int i;
783         struct bio_vec *bvec;
784         struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
785                                         GFP_NOIO);
786         if (unlikely(!bvecs))
787                 return;
788
789         bio_for_each_segment(bvec, bio, i) {
790                 bvecs[i] = *bvec;
791                 bvecs[i].bv_page = alloc_page(GFP_NOIO);
792                 if (unlikely(!bvecs[i].bv_page))
793                         goto do_sync_io;
794                 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
795                        kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
796                 kunmap(bvecs[i].bv_page);
797                 kunmap(bvec->bv_page);
798         }
799         r1_bio->behind_bvecs = bvecs;
800         r1_bio->behind_page_count = bio->bi_vcnt;
801         set_bit(R1BIO_BehindIO, &r1_bio->state);
802         return;
803
804 do_sync_io:
805         for (i = 0; i < bio->bi_vcnt; i++)
806                 if (bvecs[i].bv_page)
807                         put_page(bvecs[i].bv_page);
808         kfree(bvecs);
809         pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
810 }
811
812 static void make_request(struct mddev *mddev, struct bio * bio)
813 {
814         struct r1conf *conf = mddev->private;
815         struct mirror_info *mirror;
816         struct r1bio *r1_bio;
817         struct bio *read_bio;
818         int i, disks;
819         struct bitmap *bitmap;
820         unsigned long flags;
821         const int rw = bio_data_dir(bio);
822         const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
823         const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
824         struct md_rdev *blocked_rdev;
825         int plugged;
826         int first_clone;
827         int sectors_handled;
828         int max_sectors;
829
830         /*
831          * Register the new request and wait if the reconstruction
832          * thread has put up a bar for new requests.
833          * Continue immediately if no resync is active currently.
834          */
835
836         md_write_start(mddev, bio); /* wait on superblock update early */
837
838         if (bio_data_dir(bio) == WRITE &&
839             bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
840             bio->bi_sector < mddev->suspend_hi) {
841                 /* As the suspend_* range is controlled by
842                  * userspace, we want an interruptible
843                  * wait.
844                  */
845                 DEFINE_WAIT(w);
846                 for (;;) {
847                         flush_signals(current);
848                         prepare_to_wait(&conf->wait_barrier,
849                                         &w, TASK_INTERRUPTIBLE);
850                         if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
851                             bio->bi_sector >= mddev->suspend_hi)
852                                 break;
853                         schedule();
854                 }
855                 finish_wait(&conf->wait_barrier, &w);
856         }
857
858         wait_barrier(conf);
859
860         bitmap = mddev->bitmap;
861
862         /*
863          * make_request() can abort the operation when READA is being
864          * used and no empty request is available.
865          *
866          */
867         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
868
869         r1_bio->master_bio = bio;
870         r1_bio->sectors = bio->bi_size >> 9;
871         r1_bio->state = 0;
872         r1_bio->mddev = mddev;
873         r1_bio->sector = bio->bi_sector;
874
875         /* We might need to issue multiple reads to different
876          * devices if there are bad blocks around, so we keep
877          * track of the number of reads in bio->bi_phys_segments.
878          * If this is 0, there is only one r1_bio and no locking
879          * will be needed when requests complete.  If it is
880          * non-zero, then it is the number of not-completed requests.
881          */
882         bio->bi_phys_segments = 0;
883         clear_bit(BIO_SEG_VALID, &bio->bi_flags);
884
885         if (rw == READ) {
886                 /*
887                  * read balancing logic:
888                  */
889                 int rdisk;
890
891 read_again:
892                 rdisk = read_balance(conf, r1_bio, &max_sectors);
893
894                 if (rdisk < 0) {
895                         /* couldn't find anywhere to read from */
896                         raid_end_bio_io(r1_bio);
897                         return;
898                 }
899                 mirror = conf->mirrors + rdisk;
900
901                 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
902                     bitmap) {
903                         /* Reading from a write-mostly device must
904                          * take care not to over-take any writes
905                          * that are 'behind'
906                          */
907                         wait_event(bitmap->behind_wait,
908                                    atomic_read(&bitmap->behind_writes) == 0);
909                 }
910                 r1_bio->read_disk = rdisk;
911
912                 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
913                 md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
914                             max_sectors);
915
916                 r1_bio->bios[rdisk] = read_bio;
917
918                 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
919                 read_bio->bi_bdev = mirror->rdev->bdev;
920                 read_bio->bi_end_io = raid1_end_read_request;
921                 read_bio->bi_rw = READ | do_sync;
922                 read_bio->bi_private = r1_bio;
923
924                 if (max_sectors < r1_bio->sectors) {
925                         /* could not read all from this device, so we will
926                          * need another r1_bio.
927                          */
928
929                         sectors_handled = (r1_bio->sector + max_sectors
930                                            - bio->bi_sector);
931                         r1_bio->sectors = max_sectors;
932                         spin_lock_irq(&conf->device_lock);
933                         if (bio->bi_phys_segments == 0)
934                                 bio->bi_phys_segments = 2;
935                         else
936                                 bio->bi_phys_segments++;
937                         spin_unlock_irq(&conf->device_lock);
938                         /* Cannot call generic_make_request directly
939                          * as that will be queued in __make_request
940                          * and subsequent mempool_alloc might block waiting
941                          * for it.  So hand bio over to raid1d.
942                          */
943                         reschedule_retry(r1_bio);
944
945                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
946
947                         r1_bio->master_bio = bio;
948                         r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
949                         r1_bio->state = 0;
950                         r1_bio->mddev = mddev;
951                         r1_bio->sector = bio->bi_sector + sectors_handled;
952                         goto read_again;
953                 } else
954                         generic_make_request(read_bio);
955                 return;
956         }
957
958         /*
959          * WRITE:
960          */
961         if (conf->pending_count >= max_queued_requests) {
962                 md_wakeup_thread(mddev->thread);
963                 wait_event(conf->wait_barrier,
964                            conf->pending_count < max_queued_requests);
965         }
966         /* first select target devices under rcu_lock and
967          * inc refcount on their rdev.  Record them by setting
968          * bios[x] to bio
969          * If there are known/acknowledged bad blocks on any device on
970          * which we have seen a write error, we want to avoid writing those
971          * blocks.
972          * This potentially requires several writes to write around
973          * the bad blocks.  Each set of writes gets it's own r1bio
974          * with a set of bios attached.
975          */
976         plugged = mddev_check_plugged(mddev);
977
978         disks = conf->raid_disks * 2;
979  retry_write:
980         blocked_rdev = NULL;
981         rcu_read_lock();
982         max_sectors = r1_bio->sectors;
983         for (i = 0;  i < disks; i++) {
984                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
985                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
986                         atomic_inc(&rdev->nr_pending);
987                         blocked_rdev = rdev;
988                         break;
989                 }
990                 r1_bio->bios[i] = NULL;
991                 if (!rdev || test_bit(Faulty, &rdev->flags)) {
992                         if (i < conf->raid_disks)
993                                 set_bit(R1BIO_Degraded, &r1_bio->state);
994                         continue;
995                 }
996
997                 atomic_inc(&rdev->nr_pending);
998                 if (test_bit(WriteErrorSeen, &rdev->flags)) {
999                         sector_t first_bad;
1000                         int bad_sectors;
1001                         int is_bad;
1002
1003                         is_bad = is_badblock(rdev, r1_bio->sector,
1004                                              max_sectors,
1005                                              &first_bad, &bad_sectors);
1006                         if (is_bad < 0) {
1007                                 /* mustn't write here until the bad block is
1008                                  * acknowledged*/
1009                                 set_bit(BlockedBadBlocks, &rdev->flags);
1010                                 blocked_rdev = rdev;
1011                                 break;
1012                         }
1013                         if (is_bad && first_bad <= r1_bio->sector) {
1014                                 /* Cannot write here at all */
1015                                 bad_sectors -= (r1_bio->sector - first_bad);
1016                                 if (bad_sectors < max_sectors)
1017                                         /* mustn't write more than bad_sectors
1018                                          * to other devices yet
1019                                          */
1020                                         max_sectors = bad_sectors;
1021                                 rdev_dec_pending(rdev, mddev);
1022                                 /* We don't set R1BIO_Degraded as that
1023                                  * only applies if the disk is
1024                                  * missing, so it might be re-added,
1025                                  * and we want to know to recover this
1026                                  * chunk.
1027                                  * In this case the device is here,
1028                                  * and the fact that this chunk is not
1029                                  * in-sync is recorded in the bad
1030                                  * block log
1031                                  */
1032                                 continue;
1033                         }
1034                         if (is_bad) {
1035                                 int good_sectors = first_bad - r1_bio->sector;
1036                                 if (good_sectors < max_sectors)
1037                                         max_sectors = good_sectors;
1038                         }
1039                 }
1040                 r1_bio->bios[i] = bio;
1041         }
1042         rcu_read_unlock();
1043
1044         if (unlikely(blocked_rdev)) {
1045                 /* Wait for this device to become unblocked */
1046                 int j;
1047
1048                 for (j = 0; j < i; j++)
1049                         if (r1_bio->bios[j])
1050                                 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1051                 r1_bio->state = 0;
1052                 allow_barrier(conf);
1053                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1054                 wait_barrier(conf);
1055                 goto retry_write;
1056         }
1057
1058         if (max_sectors < r1_bio->sectors) {
1059                 /* We are splitting this write into multiple parts, so
1060                  * we need to prepare for allocating another r1_bio.
1061                  */
1062                 r1_bio->sectors = max_sectors;
1063                 spin_lock_irq(&conf->device_lock);
1064                 if (bio->bi_phys_segments == 0)
1065                         bio->bi_phys_segments = 2;
1066                 else
1067                         bio->bi_phys_segments++;
1068                 spin_unlock_irq(&conf->device_lock);
1069         }
1070         sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
1071
1072         atomic_set(&r1_bio->remaining, 1);
1073         atomic_set(&r1_bio->behind_remaining, 0);
1074
1075         first_clone = 1;
1076         for (i = 0; i < disks; i++) {
1077                 struct bio *mbio;
1078                 if (!r1_bio->bios[i])
1079                         continue;
1080
1081                 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1082                 md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
1083
1084                 if (first_clone) {
1085                         /* do behind I/O ?
1086                          * Not if there are too many, or cannot
1087                          * allocate memory, or a reader on WriteMostly
1088                          * is waiting for behind writes to flush */
1089                         if (bitmap &&
1090                             (atomic_read(&bitmap->behind_writes)
1091                              < mddev->bitmap_info.max_write_behind) &&
1092                             !waitqueue_active(&bitmap->behind_wait))
1093                                 alloc_behind_pages(mbio, r1_bio);
1094
1095                         bitmap_startwrite(bitmap, r1_bio->sector,
1096                                           r1_bio->sectors,
1097                                           test_bit(R1BIO_BehindIO,
1098                                                    &r1_bio->state));
1099                         first_clone = 0;
1100                 }
1101                 if (r1_bio->behind_bvecs) {
1102                         struct bio_vec *bvec;
1103                         int j;
1104
1105                         /* Yes, I really want the '__' version so that
1106                          * we clear any unused pointer in the io_vec, rather
1107                          * than leave them unchanged.  This is important
1108                          * because when we come to free the pages, we won't
1109                          * know the original bi_idx, so we just free
1110                          * them all
1111                          */
1112                         __bio_for_each_segment(bvec, mbio, j, 0)
1113                                 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1114                         if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1115                                 atomic_inc(&r1_bio->behind_remaining);
1116                 }
1117
1118                 r1_bio->bios[i] = mbio;
1119
1120                 mbio->bi_sector = (r1_bio->sector +
1121                                    conf->mirrors[i].rdev->data_offset);
1122                 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1123                 mbio->bi_end_io = raid1_end_write_request;
1124                 mbio->bi_rw = WRITE | do_flush_fua | do_sync;
1125                 mbio->bi_private = r1_bio;
1126
1127                 atomic_inc(&r1_bio->remaining);
1128                 spin_lock_irqsave(&conf->device_lock, flags);
1129                 bio_list_add(&conf->pending_bio_list, mbio);
1130                 conf->pending_count++;
1131                 spin_unlock_irqrestore(&conf->device_lock, flags);
1132         }
1133         /* Mustn't call r1_bio_write_done before this next test,
1134          * as it could result in the bio being freed.
1135          */
1136         if (sectors_handled < (bio->bi_size >> 9)) {
1137                 r1_bio_write_done(r1_bio);
1138                 /* We need another r1_bio.  It has already been counted
1139                  * in bio->bi_phys_segments
1140                  */
1141                 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1142                 r1_bio->master_bio = bio;
1143                 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1144                 r1_bio->state = 0;
1145                 r1_bio->mddev = mddev;
1146                 r1_bio->sector = bio->bi_sector + sectors_handled;
1147                 goto retry_write;
1148         }
1149
1150         r1_bio_write_done(r1_bio);
1151
1152         /* In case raid1d snuck in to freeze_array */
1153         wake_up(&conf->wait_barrier);
1154
1155         if (do_sync || !bitmap || !plugged)
1156                 md_wakeup_thread(mddev->thread);
1157 }
1158
1159 static void status(struct seq_file *seq, struct mddev *mddev)
1160 {
1161         struct r1conf *conf = mddev->private;
1162         int i;
1163
1164         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1165                    conf->raid_disks - mddev->degraded);
1166         rcu_read_lock();
1167         for (i = 0; i < conf->raid_disks; i++) {
1168                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1169                 seq_printf(seq, "%s",
1170                            rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1171         }
1172         rcu_read_unlock();
1173         seq_printf(seq, "]");
1174 }
1175
1176
1177 static void error(struct mddev *mddev, struct md_rdev *rdev)
1178 {
1179         char b[BDEVNAME_SIZE];
1180         struct r1conf *conf = mddev->private;
1181
1182         /*
1183          * If it is not operational, then we have already marked it as dead
1184          * else if it is the last working disks, ignore the error, let the
1185          * next level up know.
1186          * else mark the drive as failed
1187          */
1188         if (test_bit(In_sync, &rdev->flags)
1189             && (conf->raid_disks - mddev->degraded) == 1) {
1190                 /*
1191                  * Don't fail the drive, act as though we were just a
1192                  * normal single drive.
1193                  * However don't try a recovery from this drive as
1194                  * it is very likely to fail.
1195                  */
1196                 conf->recovery_disabled = mddev->recovery_disabled;
1197                 return;
1198         }
1199         set_bit(Blocked, &rdev->flags);
1200         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1201                 unsigned long flags;
1202                 spin_lock_irqsave(&conf->device_lock, flags);
1203                 mddev->degraded++;
1204                 set_bit(Faulty, &rdev->flags);
1205                 spin_unlock_irqrestore(&conf->device_lock, flags);
1206                 /*
1207                  * if recovery is running, make sure it aborts.
1208                  */
1209                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1210         } else
1211                 set_bit(Faulty, &rdev->flags);
1212         set_bit(MD_CHANGE_DEVS, &mddev->flags);
1213         printk(KERN_ALERT
1214                "md/raid1:%s: Disk failure on %s, disabling device.\n"
1215                "md/raid1:%s: Operation continuing on %d devices.\n",
1216                mdname(mddev), bdevname(rdev->bdev, b),
1217                mdname(mddev), conf->raid_disks - mddev->degraded);
1218 }
1219
1220 static void print_conf(struct r1conf *conf)
1221 {
1222         int i;
1223
1224         printk(KERN_DEBUG "RAID1 conf printout:\n");
1225         if (!conf) {
1226                 printk(KERN_DEBUG "(!conf)\n");
1227                 return;
1228         }
1229         printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1230                 conf->raid_disks);
1231
1232         rcu_read_lock();
1233         for (i = 0; i < conf->raid_disks; i++) {
1234                 char b[BDEVNAME_SIZE];
1235                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1236                 if (rdev)
1237                         printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1238                                i, !test_bit(In_sync, &rdev->flags),
1239                                !test_bit(Faulty, &rdev->flags),
1240                                bdevname(rdev->bdev,b));
1241         }
1242         rcu_read_unlock();
1243 }
1244
1245 static void close_sync(struct r1conf *conf)
1246 {
1247         wait_barrier(conf);
1248         allow_barrier(conf);
1249
1250         mempool_destroy(conf->r1buf_pool);
1251         conf->r1buf_pool = NULL;
1252 }
1253
1254 static int raid1_spare_active(struct mddev *mddev)
1255 {
1256         int i;
1257         struct r1conf *conf = mddev->private;
1258         int count = 0;
1259         unsigned long flags;
1260
1261         /*
1262          * Find all failed disks within the RAID1 configuration 
1263          * and mark them readable.
1264          * Called under mddev lock, so rcu protection not needed.
1265          */
1266         for (i = 0; i < conf->raid_disks; i++) {
1267                 struct md_rdev *rdev = conf->mirrors[i].rdev;
1268                 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1269                 if (repl
1270                     && repl->recovery_offset == MaxSector
1271                     && !test_bit(Faulty, &repl->flags)
1272                     && !test_and_set_bit(In_sync, &repl->flags)) {
1273                         /* replacement has just become active */
1274                         if (!rdev ||
1275                             !test_and_clear_bit(In_sync, &rdev->flags))
1276                                 count++;
1277                         if (rdev) {
1278                                 /* Replaced device not technically
1279                                  * faulty, but we need to be sure
1280                                  * it gets removed and never re-added
1281                                  */
1282                                 set_bit(Faulty, &rdev->flags);
1283                                 sysfs_notify_dirent_safe(
1284                                         rdev->sysfs_state);
1285                         }
1286                 }
1287                 if (rdev
1288                     && !test_bit(Faulty, &rdev->flags)
1289                     && !test_and_set_bit(In_sync, &rdev->flags)) {
1290                         count++;
1291                         sysfs_notify_dirent_safe(rdev->sysfs_state);
1292                 }
1293         }
1294         spin_lock_irqsave(&conf->device_lock, flags);
1295         mddev->degraded -= count;
1296         spin_unlock_irqrestore(&conf->device_lock, flags);
1297
1298         print_conf(conf);
1299         return count;
1300 }
1301
1302
1303 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1304 {
1305         struct r1conf *conf = mddev->private;
1306         int err = -EEXIST;
1307         int mirror = 0;
1308         struct mirror_info *p;
1309         int first = 0;
1310         int last = conf->raid_disks - 1;
1311
1312         if (mddev->recovery_disabled == conf->recovery_disabled)
1313                 return -EBUSY;
1314
1315         if (rdev->raid_disk >= 0)
1316                 first = last = rdev->raid_disk;
1317
1318         for (mirror = first; mirror <= last; mirror++)
1319                 if ( !(p=conf->mirrors+mirror)->rdev) {
1320
1321                         disk_stack_limits(mddev->gendisk, rdev->bdev,
1322                                           rdev->data_offset << 9);
1323                         /* as we don't honour merge_bvec_fn, we must
1324                          * never risk violating it, so limit
1325                          * ->max_segments to one lying with a single
1326                          * page, as a one page request is never in
1327                          * violation.
1328                          */
1329                         if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1330                                 blk_queue_max_segments(mddev->queue, 1);
1331                                 blk_queue_segment_boundary(mddev->queue,
1332                                                            PAGE_CACHE_SIZE - 1);
1333                         }
1334
1335                         p->head_position = 0;
1336                         rdev->raid_disk = mirror;
1337                         err = 0;
1338                         /* As all devices are equivalent, we don't need a full recovery
1339                          * if this was recently any drive of the array
1340                          */
1341                         if (rdev->saved_raid_disk < 0)
1342                                 conf->fullsync = 1;
1343                         rcu_assign_pointer(p->rdev, rdev);
1344                         break;
1345                 }
1346         md_integrity_add_rdev(rdev, mddev);
1347         print_conf(conf);
1348         return err;
1349 }
1350
1351 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1352 {
1353         struct r1conf *conf = mddev->private;
1354         int err = 0;
1355         int number = rdev->raid_disk;
1356         struct mirror_info *p = conf->mirrors+ number;
1357
1358         if (rdev != p->rdev)
1359                 p = conf->mirrors + conf->raid_disks + number;
1360
1361         print_conf(conf);
1362         if (rdev == p->rdev) {
1363                 if (test_bit(In_sync, &rdev->flags) ||
1364                     atomic_read(&rdev->nr_pending)) {
1365                         err = -EBUSY;
1366                         goto abort;
1367                 }
1368                 /* Only remove non-faulty devices if recovery
1369                  * is not possible.
1370                  */
1371                 if (!test_bit(Faulty, &rdev->flags) &&
1372                     mddev->recovery_disabled != conf->recovery_disabled &&
1373                     mddev->degraded < conf->raid_disks) {
1374                         err = -EBUSY;
1375                         goto abort;
1376                 }
1377                 p->rdev = NULL;
1378                 synchronize_rcu();
1379                 if (atomic_read(&rdev->nr_pending)) {
1380                         /* lost the race, try later */
1381                         err = -EBUSY;
1382                         p->rdev = rdev;
1383                         goto abort;
1384                 } else if (conf->mirrors[conf->raid_disks + number].rdev) {
1385                         /* We just removed a device that is being replaced.
1386                          * Move down the replacement.  We drain all IO before
1387                          * doing this to avoid confusion.
1388                          */
1389                         struct md_rdev *repl =
1390                                 conf->mirrors[conf->raid_disks + number].rdev;
1391                         raise_barrier(conf);
1392                         clear_bit(Replacement, &repl->flags);
1393                         p->rdev = repl;
1394                         conf->mirrors[conf->raid_disks + number].rdev = NULL;
1395                         lower_barrier(conf);
1396                         clear_bit(WantReplacement, &rdev->flags);
1397                 } else
1398                         clear_bit(WantReplacement, &rdev->flags);
1399                 err = md_integrity_register(mddev);
1400         }
1401 abort:
1402
1403         print_conf(conf);
1404         return err;
1405 }
1406
1407
1408 static void end_sync_read(struct bio *bio, int error)
1409 {
1410         struct r1bio *r1_bio = bio->bi_private;
1411
1412         update_head_pos(r1_bio->read_disk, r1_bio);
1413
1414         /*
1415          * we have read a block, now it needs to be re-written,
1416          * or re-read if the read failed.
1417          * We don't do much here, just schedule handling by raid1d
1418          */
1419         if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1420                 set_bit(R1BIO_Uptodate, &r1_bio->state);
1421
1422         if (atomic_dec_and_test(&r1_bio->remaining))
1423                 reschedule_retry(r1_bio);
1424 }
1425
1426 static void end_sync_write(struct bio *bio, int error)
1427 {
1428         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1429         struct r1bio *r1_bio = bio->bi_private;
1430         struct mddev *mddev = r1_bio->mddev;
1431         struct r1conf *conf = mddev->private;
1432         int mirror=0;
1433         sector_t first_bad;
1434         int bad_sectors;
1435
1436         mirror = find_bio_disk(r1_bio, bio);
1437
1438         if (!uptodate) {
1439                 sector_t sync_blocks = 0;
1440                 sector_t s = r1_bio->sector;
1441                 long sectors_to_go = r1_bio->sectors;
1442                 /* make sure these bits doesn't get cleared. */
1443                 do {
1444                         bitmap_end_sync(mddev->bitmap, s,
1445                                         &sync_blocks, 1);
1446                         s += sync_blocks;
1447                         sectors_to_go -= sync_blocks;
1448                 } while (sectors_to_go > 0);
1449                 set_bit(WriteErrorSeen,
1450                         &conf->mirrors[mirror].rdev->flags);
1451                 set_bit(R1BIO_WriteError, &r1_bio->state);
1452         } else if (is_badblock(conf->mirrors[mirror].rdev,
1453                                r1_bio->sector,
1454                                r1_bio->sectors,
1455                                &first_bad, &bad_sectors) &&
1456                    !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1457                                 r1_bio->sector,
1458                                 r1_bio->sectors,
1459                                 &first_bad, &bad_sectors)
1460                 )
1461                 set_bit(R1BIO_MadeGood, &r1_bio->state);
1462
1463         if (atomic_dec_and_test(&r1_bio->remaining)) {
1464                 int s = r1_bio->sectors;
1465                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1466                     test_bit(R1BIO_WriteError, &r1_bio->state))
1467                         reschedule_retry(r1_bio);
1468                 else {
1469                         put_buf(r1_bio);
1470                         md_done_sync(mddev, s, uptodate);
1471                 }
1472         }
1473 }
1474
1475 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1476                             int sectors, struct page *page, int rw)
1477 {
1478         if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1479                 /* success */
1480                 return 1;
1481         if (rw == WRITE)
1482                 set_bit(WriteErrorSeen, &rdev->flags);
1483         /* need to record an error - either for the block or the device */
1484         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1485                 md_error(rdev->mddev, rdev);
1486         return 0;
1487 }
1488
1489 static int fix_sync_read_error(struct r1bio *r1_bio)
1490 {
1491         /* Try some synchronous reads of other devices to get
1492          * good data, much like with normal read errors.  Only
1493          * read into the pages we already have so we don't
1494          * need to re-issue the read request.
1495          * We don't need to freeze the array, because being in an
1496          * active sync request, there is no normal IO, and
1497          * no overlapping syncs.
1498          * We don't need to check is_badblock() again as we
1499          * made sure that anything with a bad block in range
1500          * will have bi_end_io clear.
1501          */
1502         struct mddev *mddev = r1_bio->mddev;
1503         struct r1conf *conf = mddev->private;
1504         struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1505         sector_t sect = r1_bio->sector;
1506         int sectors = r1_bio->sectors;
1507         int idx = 0;
1508
1509         while(sectors) {
1510                 int s = sectors;
1511                 int d = r1_bio->read_disk;
1512                 int success = 0;
1513                 struct md_rdev *rdev;
1514                 int start;
1515
1516                 if (s > (PAGE_SIZE>>9))
1517                         s = PAGE_SIZE >> 9;
1518                 do {
1519                         if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1520                                 /* No rcu protection needed here devices
1521                                  * can only be removed when no resync is
1522                                  * active, and resync is currently active
1523                                  */
1524                                 rdev = conf->mirrors[d].rdev;
1525                                 if (sync_page_io(rdev, sect, s<<9,
1526                                                  bio->bi_io_vec[idx].bv_page,
1527                                                  READ, false)) {
1528                                         success = 1;
1529                                         break;
1530                                 }
1531                         }
1532                         d++;
1533                         if (d == conf->raid_disks * 2)
1534                                 d = 0;
1535                 } while (!success && d != r1_bio->read_disk);
1536
1537                 if (!success) {
1538                         char b[BDEVNAME_SIZE];
1539                         int abort = 0;
1540                         /* Cannot read from anywhere, this block is lost.
1541                          * Record a bad block on each device.  If that doesn't
1542                          * work just disable and interrupt the recovery.
1543                          * Don't fail devices as that won't really help.
1544                          */
1545                         printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1546                                " for block %llu\n",
1547                                mdname(mddev),
1548                                bdevname(bio->bi_bdev, b),
1549                                (unsigned long long)r1_bio->sector);
1550                         for (d = 0; d < conf->raid_disks * 2; d++) {
1551                                 rdev = conf->mirrors[d].rdev;
1552                                 if (!rdev || test_bit(Faulty, &rdev->flags))
1553                                         continue;
1554                                 if (!rdev_set_badblocks(rdev, sect, s, 0))
1555                                         abort = 1;
1556                         }
1557                         if (abort) {
1558                                 conf->recovery_disabled =
1559                                         mddev->recovery_disabled;
1560                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1561                                 md_done_sync(mddev, r1_bio->sectors, 0);
1562                                 put_buf(r1_bio);
1563                                 return 0;
1564                         }
1565                         /* Try next page */
1566                         sectors -= s;
1567                         sect += s;
1568                         idx++;
1569                         continue;
1570                 }
1571
1572                 start = d;
1573                 /* write it back and re-read */
1574                 while (d != r1_bio->read_disk) {
1575                         if (d == 0)
1576                                 d = conf->raid_disks * 2;
1577                         d--;
1578                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1579                                 continue;
1580                         rdev = conf->mirrors[d].rdev;
1581                         if (r1_sync_page_io(rdev, sect, s,
1582                                             bio->bi_io_vec[idx].bv_page,
1583                                             WRITE) == 0) {
1584                                 r1_bio->bios[d]->bi_end_io = NULL;
1585                                 rdev_dec_pending(rdev, mddev);
1586                         }
1587                 }
1588                 d = start;
1589                 while (d != r1_bio->read_disk) {
1590                         if (d == 0)
1591                                 d = conf->raid_disks * 2;
1592                         d--;
1593                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1594                                 continue;
1595                         rdev = conf->mirrors[d].rdev;
1596                         if (r1_sync_page_io(rdev, sect, s,
1597                                             bio->bi_io_vec[idx].bv_page,
1598                                             READ) != 0)
1599                                 atomic_add(s, &rdev->corrected_errors);
1600                 }
1601                 sectors -= s;
1602                 sect += s;
1603                 idx ++;
1604         }
1605         set_bit(R1BIO_Uptodate, &r1_bio->state);
1606         set_bit(BIO_UPTODATE, &bio->bi_flags);
1607         return 1;
1608 }
1609
1610 static int process_checks(struct r1bio *r1_bio)
1611 {
1612         /* We have read all readable devices.  If we haven't
1613          * got the block, then there is no hope left.
1614          * If we have, then we want to do a comparison
1615          * and skip the write if everything is the same.
1616          * If any blocks failed to read, then we need to
1617          * attempt an over-write
1618          */
1619         struct mddev *mddev = r1_bio->mddev;
1620         struct r1conf *conf = mddev->private;
1621         int primary;
1622         int i;
1623
1624         for (primary = 0; primary < conf->raid_disks * 2; primary++)
1625                 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1626                     test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1627                         r1_bio->bios[primary]->bi_end_io = NULL;
1628                         rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1629                         break;
1630                 }
1631         r1_bio->read_disk = primary;
1632         for (i = 0; i < conf->raid_disks * 2; i++) {
1633                 int j;
1634                 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1635                 struct bio *pbio = r1_bio->bios[primary];
1636                 struct bio *sbio = r1_bio->bios[i];
1637                 int size;
1638
1639                 if (r1_bio->bios[i]->bi_end_io != end_sync_read)
1640                         continue;
1641
1642                 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1643                         for (j = vcnt; j-- ; ) {
1644                                 struct page *p, *s;
1645                                 p = pbio->bi_io_vec[j].bv_page;
1646                                 s = sbio->bi_io_vec[j].bv_page;
1647                                 if (memcmp(page_address(p),
1648                                            page_address(s),
1649                                            PAGE_SIZE))
1650                                         break;
1651                         }
1652                 } else
1653                         j = 0;
1654                 if (j >= 0)
1655                         mddev->resync_mismatches += r1_bio->sectors;
1656                 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1657                               && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1658                         /* No need to write to this device. */
1659                         sbio->bi_end_io = NULL;
1660                         rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1661                         continue;
1662                 }
1663                 /* fixup the bio for reuse */
1664                 sbio->bi_vcnt = vcnt;
1665                 sbio->bi_size = r1_bio->sectors << 9;
1666                 sbio->bi_idx = 0;
1667                 sbio->bi_phys_segments = 0;
1668                 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1669                 sbio->bi_flags |= 1 << BIO_UPTODATE;
1670                 sbio->bi_next = NULL;
1671                 sbio->bi_sector = r1_bio->sector +
1672                         conf->mirrors[i].rdev->data_offset;
1673                 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1674                 size = sbio->bi_size;
1675                 for (j = 0; j < vcnt ; j++) {
1676                         struct bio_vec *bi;
1677                         bi = &sbio->bi_io_vec[j];
1678                         bi->bv_offset = 0;
1679                         if (size > PAGE_SIZE)
1680                                 bi->bv_len = PAGE_SIZE;
1681                         else
1682                                 bi->bv_len = size;
1683                         size -= PAGE_SIZE;
1684                         memcpy(page_address(bi->bv_page),
1685                                page_address(pbio->bi_io_vec[j].bv_page),
1686                                PAGE_SIZE);
1687                 }
1688         }
1689         return 0;
1690 }
1691
1692 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1693 {
1694         struct r1conf *conf = mddev->private;
1695         int i;
1696         int disks = conf->raid_disks * 2;
1697         struct bio *bio, *wbio;
1698
1699         bio = r1_bio->bios[r1_bio->read_disk];
1700
1701         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
1702                 /* ouch - failed to read all of that. */
1703                 if (!fix_sync_read_error(r1_bio))
1704                         return;
1705
1706         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1707                 if (process_checks(r1_bio) < 0)
1708                         return;
1709         /*
1710          * schedule writes
1711          */
1712         atomic_set(&r1_bio->remaining, 1);
1713         for (i = 0; i < disks ; i++) {
1714                 wbio = r1_bio->bios[i];
1715                 if (wbio->bi_end_io == NULL ||
1716                     (wbio->bi_end_io == end_sync_read &&
1717                      (i == r1_bio->read_disk ||
1718                       !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1719                         continue;
1720
1721                 wbio->bi_rw = WRITE;
1722                 wbio->bi_end_io = end_sync_write;
1723                 atomic_inc(&r1_bio->remaining);
1724                 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1725
1726                 generic_make_request(wbio);
1727         }
1728
1729         if (atomic_dec_and_test(&r1_bio->remaining)) {
1730                 /* if we're here, all write(s) have completed, so clean up */
1731                 md_done_sync(mddev, r1_bio->sectors, 1);
1732                 put_buf(r1_bio);
1733         }
1734 }
1735
1736 /*
1737  * This is a kernel thread which:
1738  *
1739  *      1.      Retries failed read operations on working mirrors.
1740  *      2.      Updates the raid superblock when problems encounter.
1741  *      3.      Performs writes following reads for array synchronising.
1742  */
1743
1744 static void fix_read_error(struct r1conf *conf, int read_disk,
1745                            sector_t sect, int sectors)
1746 {
1747         struct mddev *mddev = conf->mddev;
1748         while(sectors) {
1749                 int s = sectors;
1750                 int d = read_disk;
1751                 int success = 0;
1752                 int start;
1753                 struct md_rdev *rdev;
1754
1755                 if (s > (PAGE_SIZE>>9))
1756                         s = PAGE_SIZE >> 9;
1757
1758                 do {
1759                         /* Note: no rcu protection needed here
1760                          * as this is synchronous in the raid1d thread
1761                          * which is the thread that might remove
1762                          * a device.  If raid1d ever becomes multi-threaded....
1763                          */
1764                         sector_t first_bad;
1765                         int bad_sectors;
1766
1767                         rdev = conf->mirrors[d].rdev;
1768                         if (rdev &&
1769                             test_bit(In_sync, &rdev->flags) &&
1770                             is_badblock(rdev, sect, s,
1771                                         &first_bad, &bad_sectors) == 0 &&
1772                             sync_page_io(rdev, sect, s<<9,
1773                                          conf->tmppage, READ, false))
1774                                 success = 1;
1775                         else {
1776                                 d++;
1777                                 if (d == conf->raid_disks * 2)
1778                                         d = 0;
1779                         }
1780                 } while (!success && d != read_disk);
1781
1782                 if (!success) {
1783                         /* Cannot read from anywhere - mark it bad */
1784                         struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
1785                         if (!rdev_set_badblocks(rdev, sect, s, 0))
1786                                 md_error(mddev, rdev);
1787                         break;
1788                 }
1789                 /* write it back and re-read */
1790                 start = d;
1791                 while (d != read_disk) {
1792                         if (d==0)
1793                                 d = conf->raid_disks * 2;
1794                         d--;
1795                         rdev = conf->mirrors[d].rdev;
1796                         if (rdev &&
1797                             test_bit(In_sync, &rdev->flags))
1798                                 r1_sync_page_io(rdev, sect, s,
1799                                                 conf->tmppage, WRITE);
1800                 }
1801                 d = start;
1802                 while (d != read_disk) {
1803                         char b[BDEVNAME_SIZE];
1804                         if (d==0)
1805                                 d = conf->raid_disks * 2;
1806                         d--;
1807                         rdev = conf->mirrors[d].rdev;
1808                         if (rdev &&
1809                             test_bit(In_sync, &rdev->flags)) {
1810                                 if (r1_sync_page_io(rdev, sect, s,
1811                                                     conf->tmppage, READ)) {
1812                                         atomic_add(s, &rdev->corrected_errors);
1813                                         printk(KERN_INFO
1814                                                "md/raid1:%s: read error corrected "
1815                                                "(%d sectors at %llu on %s)\n",
1816                                                mdname(mddev), s,
1817                                                (unsigned long long)(sect +
1818                                                    rdev->data_offset),
1819                                                bdevname(rdev->bdev, b));
1820                                 }
1821                         }
1822                 }
1823                 sectors -= s;
1824                 sect += s;
1825         }
1826 }
1827
1828 static void bi_complete(struct bio *bio, int error)
1829 {
1830         complete((struct completion *)bio->bi_private);
1831 }
1832
1833 static int submit_bio_wait(int rw, struct bio *bio)
1834 {
1835         struct completion event;
1836         rw |= REQ_SYNC;
1837
1838         init_completion(&event);
1839         bio->bi_private = &event;
1840         bio->bi_end_io = bi_complete;
1841         submit_bio(rw, bio);
1842         wait_for_completion(&event);
1843
1844         return test_bit(BIO_UPTODATE, &bio->bi_flags);
1845 }
1846
1847 static int narrow_write_error(struct r1bio *r1_bio, int i)
1848 {
1849         struct mddev *mddev = r1_bio->mddev;
1850         struct r1conf *conf = mddev->private;
1851         struct md_rdev *rdev = conf->mirrors[i].rdev;
1852         int vcnt, idx;
1853         struct bio_vec *vec;
1854
1855         /* bio has the data to be written to device 'i' where
1856          * we just recently had a write error.
1857          * We repeatedly clone the bio and trim down to one block,
1858          * then try the write.  Where the write fails we record
1859          * a bad block.
1860          * It is conceivable that the bio doesn't exactly align with
1861          * blocks.  We must handle this somehow.
1862          *
1863          * We currently own a reference on the rdev.
1864          */
1865
1866         int block_sectors;
1867         sector_t sector;
1868         int sectors;
1869         int sect_to_write = r1_bio->sectors;
1870         int ok = 1;
1871
1872         if (rdev->badblocks.shift < 0)
1873                 return 0;
1874
1875         block_sectors = 1 << rdev->badblocks.shift;
1876         sector = r1_bio->sector;
1877         sectors = ((sector + block_sectors)
1878                    & ~(sector_t)(block_sectors - 1))
1879                 - sector;
1880
1881         if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
1882                 vcnt = r1_bio->behind_page_count;
1883                 vec = r1_bio->behind_bvecs;
1884                 idx = 0;
1885                 while (vec[idx].bv_page == NULL)
1886                         idx++;
1887         } else {
1888                 vcnt = r1_bio->master_bio->bi_vcnt;
1889                 vec = r1_bio->master_bio->bi_io_vec;
1890                 idx = r1_bio->master_bio->bi_idx;
1891         }
1892         while (sect_to_write) {
1893                 struct bio *wbio;
1894                 if (sectors > sect_to_write)
1895                         sectors = sect_to_write;
1896                 /* Write at 'sector' for 'sectors'*/
1897
1898                 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
1899                 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
1900                 wbio->bi_sector = r1_bio->sector;
1901                 wbio->bi_rw = WRITE;
1902                 wbio->bi_vcnt = vcnt;
1903                 wbio->bi_size = r1_bio->sectors << 9;
1904                 wbio->bi_idx = idx;
1905
1906                 md_trim_bio(wbio, sector - r1_bio->sector, sectors);
1907                 wbio->bi_sector += rdev->data_offset;
1908                 wbio->bi_bdev = rdev->bdev;
1909                 if (submit_bio_wait(WRITE, wbio) == 0)
1910                         /* failure! */
1911                         ok = rdev_set_badblocks(rdev, sector,
1912                                                 sectors, 0)
1913                                 && ok;
1914
1915                 bio_put(wbio);
1916                 sect_to_write -= sectors;
1917                 sector += sectors;
1918                 sectors = block_sectors;
1919         }
1920         return ok;
1921 }
1922
1923 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
1924 {
1925         int m;
1926         int s = r1_bio->sectors;
1927         for (m = 0; m < conf->raid_disks * 2 ; m++) {
1928                 struct md_rdev *rdev = conf->mirrors[m].rdev;
1929                 struct bio *bio = r1_bio->bios[m];
1930                 if (bio->bi_end_io == NULL)
1931                         continue;
1932                 if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
1933                     test_bit(R1BIO_MadeGood, &r1_bio->state)) {
1934                         rdev_clear_badblocks(rdev, r1_bio->sector, s);
1935                 }
1936                 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
1937                     test_bit(R1BIO_WriteError, &r1_bio->state)) {
1938                         if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
1939                                 md_error(conf->mddev, rdev);
1940                 }
1941         }
1942         put_buf(r1_bio);
1943         md_done_sync(conf->mddev, s, 1);
1944 }
1945
1946 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
1947 {
1948         int m;
1949         for (m = 0; m < conf->raid_disks * 2 ; m++)
1950                 if (r1_bio->bios[m] == IO_MADE_GOOD) {
1951                         struct md_rdev *rdev = conf->mirrors[m].rdev;
1952                         rdev_clear_badblocks(rdev,
1953                                              r1_bio->sector,
1954                                              r1_bio->sectors);
1955                         rdev_dec_pending(rdev, conf->mddev);
1956                 } else if (r1_bio->bios[m] != NULL) {
1957                         /* This drive got a write error.  We need to
1958                          * narrow down and record precise write
1959                          * errors.
1960                          */
1961                         if (!narrow_write_error(r1_bio, m)) {
1962                                 md_error(conf->mddev,
1963                                          conf->mirrors[m].rdev);
1964                                 /* an I/O failed, we can't clear the bitmap */
1965                                 set_bit(R1BIO_Degraded, &r1_bio->state);
1966                         }
1967                         rdev_dec_pending(conf->mirrors[m].rdev,
1968                                          conf->mddev);
1969                 }
1970         if (test_bit(R1BIO_WriteError, &r1_bio->state))
1971                 close_write(r1_bio);
1972         raid_end_bio_io(r1_bio);
1973 }
1974
1975 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
1976 {
1977         int disk;
1978         int max_sectors;
1979         struct mddev *mddev = conf->mddev;
1980         struct bio *bio;
1981         char b[BDEVNAME_SIZE];
1982         struct md_rdev *rdev;
1983
1984         clear_bit(R1BIO_ReadError, &r1_bio->state);
1985         /* we got a read error. Maybe the drive is bad.  Maybe just
1986          * the block and we can fix it.
1987          * We freeze all other IO, and try reading the block from
1988          * other devices.  When we find one, we re-write
1989          * and check it that fixes the read error.
1990          * This is all done synchronously while the array is
1991          * frozen
1992          */
1993         if (mddev->ro == 0) {
1994                 freeze_array(conf);
1995                 fix_read_error(conf, r1_bio->read_disk,
1996                                r1_bio->sector, r1_bio->sectors);
1997                 unfreeze_array(conf);
1998         } else
1999                 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2000
2001         bio = r1_bio->bios[r1_bio->read_disk];
2002         bdevname(bio->bi_bdev, b);
2003 read_more:
2004         disk = read_balance(conf, r1_bio, &max_sectors);
2005         if (disk == -1) {
2006                 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
2007                        " read error for block %llu\n",
2008                        mdname(mddev), b, (unsigned long long)r1_bio->sector);
2009                 raid_end_bio_io(r1_bio);
2010         } else {
2011                 const unsigned long do_sync
2012                         = r1_bio->master_bio->bi_rw & REQ_SYNC;
2013                 if (bio) {
2014                         r1_bio->bios[r1_bio->read_disk] =
2015                                 mddev->ro ? IO_BLOCKED : NULL;
2016                         bio_put(bio);
2017                 }
2018                 r1_bio->read_disk = disk;
2019                 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2020                 md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
2021                 r1_bio->bios[r1_bio->read_disk] = bio;
2022                 rdev = conf->mirrors[disk].rdev;
2023                 printk_ratelimited(KERN_ERR
2024                                    "md/raid1:%s: redirecting sector %llu"
2025                                    " to other mirror: %s\n",
2026                                    mdname(mddev),
2027                                    (unsigned long long)r1_bio->sector,
2028                                    bdevname(rdev->bdev, b));
2029                 bio->bi_sector = r1_bio->sector + rdev->data_offset;
2030                 bio->bi_bdev = rdev->bdev;
2031                 bio->bi_end_io = raid1_end_read_request;
2032                 bio->bi_rw = READ | do_sync;
2033                 bio->bi_private = r1_bio;
2034                 if (max_sectors < r1_bio->sectors) {
2035                         /* Drat - have to split this up more */
2036                         struct bio *mbio = r1_bio->master_bio;
2037                         int sectors_handled = (r1_bio->sector + max_sectors
2038                                                - mbio->bi_sector);
2039                         r1_bio->sectors = max_sectors;
2040                         spin_lock_irq(&conf->device_lock);
2041                         if (mbio->bi_phys_segments == 0)
2042                                 mbio->bi_phys_segments = 2;
2043                         else
2044                                 mbio->bi_phys_segments++;
2045                         spin_unlock_irq(&conf->device_lock);
2046                         generic_make_request(bio);
2047                         bio = NULL;
2048
2049                         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2050
2051                         r1_bio->master_bio = mbio;
2052                         r1_bio->sectors = (mbio->bi_size >> 9)
2053                                           - sectors_handled;
2054                         r1_bio->state = 0;
2055                         set_bit(R1BIO_ReadError, &r1_bio->state);
2056                         r1_bio->mddev = mddev;
2057                         r1_bio->sector = mbio->bi_sector + sectors_handled;
2058
2059                         goto read_more;
2060                 } else
2061                         generic_make_request(bio);
2062         }
2063 }
2064
2065 static void raid1d(struct mddev *mddev)
2066 {
2067         struct r1bio *r1_bio;
2068         unsigned long flags;
2069         struct r1conf *conf = mddev->private;
2070         struct list_head *head = &conf->retry_list;
2071         struct blk_plug plug;
2072
2073         md_check_recovery(mddev);
2074
2075         blk_start_plug(&plug);
2076         for (;;) {
2077
2078                 if (atomic_read(&mddev->plug_cnt) == 0)
2079                         flush_pending_writes(conf);
2080
2081                 spin_lock_irqsave(&conf->device_lock, flags);
2082                 if (list_empty(head)) {
2083                         spin_unlock_irqrestore(&conf->device_lock, flags);
2084                         break;
2085                 }
2086                 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2087                 list_del(head->prev);
2088                 conf->nr_queued--;
2089                 spin_unlock_irqrestore(&conf->device_lock, flags);
2090
2091                 mddev = r1_bio->mddev;
2092                 conf = mddev->private;
2093                 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2094                         if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2095                             test_bit(R1BIO_WriteError, &r1_bio->state))
2096                                 handle_sync_write_finished(conf, r1_bio);
2097                         else
2098                                 sync_request_write(mddev, r1_bio);
2099                 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2100                            test_bit(R1BIO_WriteError, &r1_bio->state))
2101                         handle_write_finished(conf, r1_bio);
2102                 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2103                         handle_read_error(conf, r1_bio);
2104                 else
2105                         /* just a partial read to be scheduled from separate
2106                          * context
2107                          */
2108                         generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2109
2110                 cond_resched();
2111                 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2112                         md_check_recovery(mddev);
2113         }
2114         blk_finish_plug(&plug);
2115 }
2116
2117
2118 static int init_resync(struct r1conf *conf)
2119 {
2120         int buffs;
2121
2122         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2123         BUG_ON(conf->r1buf_pool);
2124         conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2125                                           conf->poolinfo);
2126         if (!conf->r1buf_pool)
2127                 return -ENOMEM;
2128         conf->next_resync = 0;
2129         return 0;
2130 }
2131
2132 /*
2133  * perform a "sync" on one "block"
2134  *
2135  * We need to make sure that no normal I/O request - particularly write
2136  * requests - conflict with active sync requests.
2137  *
2138  * This is achieved by tracking pending requests and a 'barrier' concept
2139  * that can be installed to exclude normal IO requests.
2140  */
2141
2142 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
2143 {
2144         struct r1conf *conf = mddev->private;
2145         struct r1bio *r1_bio;
2146         struct bio *bio;
2147         sector_t max_sector, nr_sectors;
2148         int disk = -1;
2149         int i;
2150         int wonly = -1;
2151         int write_targets = 0, read_targets = 0;
2152         sector_t sync_blocks;
2153         int still_degraded = 0;
2154         int good_sectors = RESYNC_SECTORS;
2155         int min_bad = 0; /* number of sectors that are bad in all devices */
2156
2157         if (!conf->r1buf_pool)
2158                 if (init_resync(conf))
2159                         return 0;
2160
2161         max_sector = mddev->dev_sectors;
2162         if (sector_nr >= max_sector) {
2163                 /* If we aborted, we need to abort the
2164                  * sync on the 'current' bitmap chunk (there will
2165                  * only be one in raid1 resync.
2166                  * We can find the current addess in mddev->curr_resync
2167                  */
2168                 if (mddev->curr_resync < max_sector) /* aborted */
2169                         bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2170                                                 &sync_blocks, 1);
2171                 else /* completed sync */
2172                         conf->fullsync = 0;
2173
2174                 bitmap_close_sync(mddev->bitmap);
2175                 close_sync(conf);
2176                 return 0;
2177         }
2178
2179         if (mddev->bitmap == NULL &&
2180             mddev->recovery_cp == MaxSector &&
2181             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2182             conf->fullsync == 0) {
2183                 *skipped = 1;
2184                 return max_sector - sector_nr;
2185         }
2186         /* before building a request, check if we can skip these blocks..
2187          * This call the bitmap_start_sync doesn't actually record anything
2188          */
2189         if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2190             !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2191                 /* We can skip this block, and probably several more */
2192                 *skipped = 1;
2193                 return sync_blocks;
2194         }
2195         /*
2196          * If there is non-resync activity waiting for a turn,
2197          * and resync is going fast enough,
2198          * then let it though before starting on this new sync request.
2199          */
2200         if (!go_faster && conf->nr_waiting)
2201                 msleep_interruptible(1000);
2202
2203         bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2204         r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2205         raise_barrier(conf);
2206
2207         conf->next_resync = sector_nr;
2208
2209         rcu_read_lock();
2210         /*
2211          * If we get a correctably read error during resync or recovery,
2212          * we might want to read from a different device.  So we
2213          * flag all drives that could conceivably be read from for READ,
2214          * and any others (which will be non-In_sync devices) for WRITE.
2215          * If a read fails, we try reading from something else for which READ
2216          * is OK.
2217          */
2218
2219         r1_bio->mddev = mddev;
2220         r1_bio->sector = sector_nr;
2221         r1_bio->state = 0;
2222         set_bit(R1BIO_IsSync, &r1_bio->state);
2223
2224         for (i = 0; i < conf->raid_disks * 2; i++) {
2225                 struct md_rdev *rdev;
2226                 bio = r1_bio->bios[i];
2227
2228                 /* take from bio_init */
2229                 bio->bi_next = NULL;
2230                 bio->bi_flags &= ~(BIO_POOL_MASK-1);
2231                 bio->bi_flags |= 1 << BIO_UPTODATE;
2232                 bio->bi_rw = READ;
2233                 bio->bi_vcnt = 0;
2234                 bio->bi_idx = 0;
2235                 bio->bi_phys_segments = 0;
2236                 bio->bi_size = 0;
2237                 bio->bi_end_io = NULL;
2238                 bio->bi_private = NULL;
2239
2240                 rdev = rcu_dereference(conf->mirrors[i].rdev);
2241                 if (rdev == NULL ||
2242                     test_bit(Faulty, &rdev->flags)) {
2243                         if (i < conf->raid_disks)
2244                                 still_degraded = 1;
2245                 } else if (!test_bit(In_sync, &rdev->flags)) {
2246                         bio->bi_rw = WRITE;
2247                         bio->bi_end_io = end_sync_write;
2248                         write_targets ++;
2249                 } else {
2250                         /* may need to read from here */
2251                         sector_t first_bad = MaxSector;
2252                         int bad_sectors;
2253
2254                         if (is_badblock(rdev, sector_nr, good_sectors,
2255                                         &first_bad, &bad_sectors)) {
2256                                 if (first_bad > sector_nr)
2257                                         good_sectors = first_bad - sector_nr;
2258                                 else {
2259                                         bad_sectors -= (sector_nr - first_bad);
2260                                         if (min_bad == 0 ||
2261                                             min_bad > bad_sectors)
2262                                                 min_bad = bad_sectors;
2263                                 }
2264                         }
2265                         if (sector_nr < first_bad) {
2266                                 if (test_bit(WriteMostly, &rdev->flags)) {
2267                                         if (wonly < 0)
2268                                                 wonly = i;
2269                                 } else {
2270                                         if (disk < 0)
2271                                                 disk = i;
2272                                 }
2273                                 bio->bi_rw = READ;
2274                                 bio->bi_end_io = end_sync_read;
2275                                 read_targets++;
2276                         }
2277                 }
2278                 if (bio->bi_end_io) {
2279                         atomic_inc(&rdev->nr_pending);
2280                         bio->bi_sector = sector_nr + rdev->data_offset;
2281                         bio->bi_bdev = rdev->bdev;
2282                         bio->bi_private = r1_bio;
2283                 }
2284         }
2285         rcu_read_unlock();
2286         if (disk < 0)
2287                 disk = wonly;
2288         r1_bio->read_disk = disk;
2289
2290         if (read_targets == 0 && min_bad > 0) {
2291                 /* These sectors are bad on all InSync devices, so we
2292                  * need to mark them bad on all write targets
2293                  */
2294                 int ok = 1;
2295                 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2296                         if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2297                                 struct md_rdev *rdev =
2298                                         rcu_dereference(conf->mirrors[i].rdev);
2299                                 ok = rdev_set_badblocks(rdev, sector_nr,
2300                                                         min_bad, 0
2301                                         ) && ok;
2302                         }
2303                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2304                 *skipped = 1;
2305                 put_buf(r1_bio);
2306
2307                 if (!ok) {
2308                         /* Cannot record the badblocks, so need to
2309                          * abort the resync.
2310                          * If there are multiple read targets, could just
2311                          * fail the really bad ones ???
2312                          */
2313                         conf->recovery_disabled = mddev->recovery_disabled;
2314                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2315                         return 0;
2316                 } else
2317                         return min_bad;
2318
2319         }
2320         if (min_bad > 0 && min_bad < good_sectors) {
2321                 /* only resync enough to reach the next bad->good
2322                  * transition */
2323                 good_sectors = min_bad;
2324         }
2325
2326         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2327                 /* extra read targets are also write targets */
2328                 write_targets += read_targets-1;
2329
2330         if (write_targets == 0 || read_targets == 0) {
2331                 /* There is nowhere to write, so all non-sync
2332                  * drives must be failed - so we are finished
2333                  */
2334                 sector_t rv = max_sector - sector_nr;
2335                 *skipped = 1;
2336                 put_buf(r1_bio);
2337                 return rv;
2338         }
2339
2340         if (max_sector > mddev->resync_max)
2341                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2342         if (max_sector > sector_nr + good_sectors)
2343                 max_sector = sector_nr + good_sectors;
2344         nr_sectors = 0;
2345         sync_blocks = 0;
2346         do {
2347                 struct page *page;
2348                 int len = PAGE_SIZE;
2349                 if (sector_nr + (len>>9) > max_sector)
2350                         len = (max_sector - sector_nr) << 9;
2351                 if (len == 0)
2352                         break;
2353                 if (sync_blocks == 0) {
2354                         if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2355                                                &sync_blocks, still_degraded) &&
2356                             !conf->fullsync &&
2357                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2358                                 break;
2359                         BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2360                         if ((len >> 9) > sync_blocks)
2361                                 len = sync_blocks<<9;
2362                 }
2363
2364                 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2365                         bio = r1_bio->bios[i];
2366                         if (bio->bi_end_io) {
2367                                 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2368                                 if (bio_add_page(bio, page, len, 0) == 0) {
2369                                         /* stop here */
2370                                         bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2371                                         while (i > 0) {
2372                                                 i--;
2373                                                 bio = r1_bio->bios[i];
2374                                                 if (bio->bi_end_io==NULL)
2375                                                         continue;
2376                                                 /* remove last page from this bio */
2377                                                 bio->bi_vcnt--;
2378                                                 bio->bi_size -= len;
2379                                                 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2380                                         }
2381                                         goto bio_full;
2382                                 }
2383                         }
2384                 }
2385                 nr_sectors += len>>9;
2386                 sector_nr += len>>9;
2387                 sync_blocks -= (len>>9);
2388         } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2389  bio_full:
2390         r1_bio->sectors = nr_sectors;
2391
2392         /* For a user-requested sync, we read all readable devices and do a
2393          * compare
2394          */
2395         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2396                 atomic_set(&r1_bio->remaining, read_targets);
2397                 for (i = 0; i < conf->raid_disks * 2; i++) {
2398                         bio = r1_bio->bios[i];
2399                         if (bio->bi_end_io == end_sync_read) {
2400                                 md_sync_acct(bio->bi_bdev, nr_sectors);
2401                                 generic_make_request(bio);
2402                         }
2403                 }
2404         } else {
2405                 atomic_set(&r1_bio->remaining, 1);
2406                 bio = r1_bio->bios[r1_bio->read_disk];
2407                 md_sync_acct(bio->bi_bdev, nr_sectors);
2408                 generic_make_request(bio);
2409
2410         }
2411         return nr_sectors;
2412 }
2413
2414 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2415 {
2416         if (sectors)
2417                 return sectors;
2418
2419         return mddev->dev_sectors;
2420 }
2421
2422 static struct r1conf *setup_conf(struct mddev *mddev)
2423 {
2424         struct r1conf *conf;
2425         int i;
2426         struct mirror_info *disk;
2427         struct md_rdev *rdev;
2428         int err = -ENOMEM;
2429
2430         conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2431         if (!conf)
2432                 goto abort;
2433
2434         conf->mirrors = kzalloc(sizeof(struct mirror_info)
2435                                 * mddev->raid_disks * 2,
2436                                  GFP_KERNEL);
2437         if (!conf->mirrors)
2438                 goto abort;
2439
2440         conf->tmppage = alloc_page(GFP_KERNEL);
2441         if (!conf->tmppage)
2442                 goto abort;
2443
2444         conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2445         if (!conf->poolinfo)
2446                 goto abort;
2447         conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2448         conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2449                                           r1bio_pool_free,
2450                                           conf->poolinfo);
2451         if (!conf->r1bio_pool)
2452                 goto abort;
2453
2454         conf->poolinfo->mddev = mddev;
2455
2456         err = -EINVAL;
2457         spin_lock_init(&conf->device_lock);
2458         list_for_each_entry(rdev, &mddev->disks, same_set) {
2459                 int disk_idx = rdev->raid_disk;
2460                 if (disk_idx >= mddev->raid_disks
2461                     || disk_idx < 0)
2462                         continue;
2463                 if (test_bit(Replacement, &rdev->flags))
2464                         disk = conf->mirrors + conf->raid_disks + disk_idx;
2465                 else
2466                         disk = conf->mirrors + disk_idx;
2467
2468                 if (disk->rdev)
2469                         goto abort;
2470                 disk->rdev = rdev;
2471
2472                 disk->head_position = 0;
2473         }
2474         conf->raid_disks = mddev->raid_disks;
2475         conf->mddev = mddev;
2476         INIT_LIST_HEAD(&conf->retry_list);
2477
2478         spin_lock_init(&conf->resync_lock);
2479         init_waitqueue_head(&conf->wait_barrier);
2480
2481         bio_list_init(&conf->pending_bio_list);
2482         conf->pending_count = 0;
2483         conf->recovery_disabled = mddev->recovery_disabled - 1;
2484
2485         err = -EIO;
2486         conf->last_used = -1;
2487         for (i = 0; i < conf->raid_disks * 2; i++) {
2488
2489                 disk = conf->mirrors + i;
2490
2491                 if (i < conf->raid_disks &&
2492                     disk[conf->raid_disks].rdev) {
2493                         /* This slot has a replacement. */
2494                         if (!disk->rdev) {
2495                                 /* No original, just make the replacement
2496                                  * a recovering spare
2497                                  */
2498                                 disk->rdev =
2499                                         disk[conf->raid_disks].rdev;
2500                                 disk[conf->raid_disks].rdev = NULL;
2501                         } else if (!test_bit(In_sync, &disk->rdev->flags))
2502                                 /* Original is not in_sync - bad */
2503                                 goto abort;
2504                 }
2505
2506                 if (!disk->rdev ||
2507                     !test_bit(In_sync, &disk->rdev->flags)) {
2508                         disk->head_position = 0;
2509                         if (disk->rdev)
2510                                 conf->fullsync = 1;
2511                 } else if (conf->last_used < 0)
2512                         /*
2513                          * The first working device is used as a
2514                          * starting point to read balancing.
2515                          */
2516                         conf->last_used = i;
2517         }
2518
2519         if (conf->last_used < 0) {
2520                 printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
2521                        mdname(mddev));
2522                 goto abort;
2523         }
2524         err = -ENOMEM;
2525         conf->thread = md_register_thread(raid1d, mddev, NULL);
2526         if (!conf->thread) {
2527                 printk(KERN_ERR
2528                        "md/raid1:%s: couldn't allocate thread\n",
2529                        mdname(mddev));
2530                 goto abort;
2531         }
2532
2533         return conf;
2534
2535  abort:
2536         if (conf) {
2537                 if (conf->r1bio_pool)
2538                         mempool_destroy(conf->r1bio_pool);
2539                 kfree(conf->mirrors);
2540                 safe_put_page(conf->tmppage);
2541                 kfree(conf->poolinfo);
2542                 kfree(conf);
2543         }
2544         return ERR_PTR(err);
2545 }
2546
2547 static int run(struct mddev *mddev)
2548 {
2549         struct r1conf *conf;
2550         int i;
2551         struct md_rdev *rdev;
2552
2553         if (mddev->level != 1) {
2554                 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2555                        mdname(mddev), mddev->level);
2556                 return -EIO;
2557         }
2558         if (mddev->reshape_position != MaxSector) {
2559                 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2560                        mdname(mddev));
2561                 return -EIO;
2562         }
2563         /*
2564          * copy the already verified devices into our private RAID1
2565          * bookkeeping area. [whatever we allocate in run(),
2566          * should be freed in stop()]
2567          */
2568         if (mddev->private == NULL)
2569                 conf = setup_conf(mddev);
2570         else
2571                 conf = mddev->private;
2572
2573         if (IS_ERR(conf))
2574                 return PTR_ERR(conf);
2575
2576         list_for_each_entry(rdev, &mddev->disks, same_set) {
2577                 if (!mddev->gendisk)
2578                         continue;
2579                 disk_stack_limits(mddev->gendisk, rdev->bdev,
2580                                   rdev->data_offset << 9);
2581                 /* as we don't honour merge_bvec_fn, we must never risk
2582                  * violating it, so limit ->max_segments to 1 lying within
2583                  * a single page, as a one page request is never in violation.
2584                  */
2585                 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2586                         blk_queue_max_segments(mddev->queue, 1);
2587                         blk_queue_segment_boundary(mddev->queue,
2588                                                    PAGE_CACHE_SIZE - 1);
2589                 }
2590         }
2591
2592         mddev->degraded = 0;
2593         for (i=0; i < conf->raid_disks; i++)
2594                 if (conf->mirrors[i].rdev == NULL ||
2595                     !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2596                     test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2597                         mddev->degraded++;
2598
2599         if (conf->raid_disks - mddev->degraded == 1)
2600                 mddev->recovery_cp = MaxSector;
2601
2602         if (mddev->recovery_cp != MaxSector)
2603                 printk(KERN_NOTICE "md/raid1:%s: not clean"
2604                        " -- starting background reconstruction\n",
2605                        mdname(mddev));
2606         printk(KERN_INFO 
2607                 "md/raid1:%s: active with %d out of %d mirrors\n",
2608                 mdname(mddev), mddev->raid_disks - mddev->degraded, 
2609                 mddev->raid_disks);
2610
2611         /*
2612          * Ok, everything is just fine now
2613          */
2614         mddev->thread = conf->thread;
2615         conf->thread = NULL;
2616         mddev->private = conf;
2617
2618         md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2619
2620         if (mddev->queue) {
2621                 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2622                 mddev->queue->backing_dev_info.congested_data = mddev;
2623         }
2624         return md_integrity_register(mddev);
2625 }
2626
2627 static int stop(struct mddev *mddev)
2628 {
2629         struct r1conf *conf = mddev->private;
2630         struct bitmap *bitmap = mddev->bitmap;
2631
2632         /* wait for behind writes to complete */
2633         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2634                 printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2635                        mdname(mddev));
2636                 /* need to kick something here to make sure I/O goes? */
2637                 wait_event(bitmap->behind_wait,
2638                            atomic_read(&bitmap->behind_writes) == 0);
2639         }
2640
2641         raise_barrier(conf);
2642         lower_barrier(conf);
2643
2644         md_unregister_thread(&mddev->thread);
2645         if (conf->r1bio_pool)
2646                 mempool_destroy(conf->r1bio_pool);
2647         kfree(conf->mirrors);
2648         kfree(conf->poolinfo);
2649         kfree(conf);
2650         mddev->private = NULL;
2651         return 0;
2652 }
2653
2654 static int raid1_resize(struct mddev *mddev, sector_t sectors)
2655 {
2656         /* no resync is happening, and there is enough space
2657          * on all devices, so we can resize.
2658          * We need to make sure resync covers any new space.
2659          * If the array is shrinking we should possibly wait until
2660          * any io in the removed space completes, but it hardly seems
2661          * worth it.
2662          */
2663         md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0));
2664         if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
2665                 return -EINVAL;
2666         set_capacity(mddev->gendisk, mddev->array_sectors);
2667         revalidate_disk(mddev->gendisk);
2668         if (sectors > mddev->dev_sectors &&
2669             mddev->recovery_cp > mddev->dev_sectors) {
2670                 mddev->recovery_cp = mddev->dev_sectors;
2671                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2672         }
2673         mddev->dev_sectors = sectors;
2674         mddev->resync_max_sectors = sectors;
2675         return 0;
2676 }
2677
2678 static int raid1_reshape(struct mddev *mddev)
2679 {
2680         /* We need to:
2681          * 1/ resize the r1bio_pool
2682          * 2/ resize conf->mirrors
2683          *
2684          * We allocate a new r1bio_pool if we can.
2685          * Then raise a device barrier and wait until all IO stops.
2686          * Then resize conf->mirrors and swap in the new r1bio pool.
2687          *
2688          * At the same time, we "pack" the devices so that all the missing
2689          * devices have the higher raid_disk numbers.
2690          */
2691         mempool_t *newpool, *oldpool;
2692         struct pool_info *newpoolinfo;
2693         struct mirror_info *newmirrors;
2694         struct r1conf *conf = mddev->private;
2695         int cnt, raid_disks;
2696         unsigned long flags;
2697         int d, d2, err;
2698
2699         /* Cannot change chunk_size, layout, or level */
2700         if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2701             mddev->layout != mddev->new_layout ||
2702             mddev->level != mddev->new_level) {
2703                 mddev->new_chunk_sectors = mddev->chunk_sectors;
2704                 mddev->new_layout = mddev->layout;
2705                 mddev->new_level = mddev->level;
2706                 return -EINVAL;
2707         }
2708
2709         err = md_allow_write(mddev);
2710         if (err)
2711                 return err;
2712
2713         raid_disks = mddev->raid_disks + mddev->delta_disks;
2714
2715         if (raid_disks < conf->raid_disks) {
2716                 cnt=0;
2717                 for (d= 0; d < conf->raid_disks; d++)
2718                         if (conf->mirrors[d].rdev)
2719                                 cnt++;
2720                 if (cnt > raid_disks)
2721                         return -EBUSY;
2722         }
2723
2724         newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2725         if (!newpoolinfo)
2726                 return -ENOMEM;
2727         newpoolinfo->mddev = mddev;
2728         newpoolinfo->raid_disks = raid_disks * 2;
2729
2730         newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2731                                  r1bio_pool_free, newpoolinfo);
2732         if (!newpool) {
2733                 kfree(newpoolinfo);
2734                 return -ENOMEM;
2735         }
2736         newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
2737                              GFP_KERNEL);
2738         if (!newmirrors) {
2739                 kfree(newpoolinfo);
2740                 mempool_destroy(newpool);
2741                 return -ENOMEM;
2742         }
2743
2744         raise_barrier(conf);
2745
2746         /* ok, everything is stopped */
2747         oldpool = conf->r1bio_pool;
2748         conf->r1bio_pool = newpool;
2749
2750         for (d = d2 = 0; d < conf->raid_disks; d++) {
2751                 struct md_rdev *rdev = conf->mirrors[d].rdev;
2752                 if (rdev && rdev->raid_disk != d2) {
2753                         sysfs_unlink_rdev(mddev, rdev);
2754                         rdev->raid_disk = d2;
2755                         sysfs_unlink_rdev(mddev, rdev);
2756                         if (sysfs_link_rdev(mddev, rdev))
2757                                 printk(KERN_WARNING
2758                                        "md/raid1:%s: cannot register rd%d\n",
2759                                        mdname(mddev), rdev->raid_disk);
2760                 }
2761                 if (rdev)
2762                         newmirrors[d2++].rdev = rdev;
2763         }
2764         kfree(conf->mirrors);
2765         conf->mirrors = newmirrors;
2766         kfree(conf->poolinfo);
2767         conf->poolinfo = newpoolinfo;
2768
2769         spin_lock_irqsave(&conf->device_lock, flags);
2770         mddev->degraded += (raid_disks - conf->raid_disks);
2771         spin_unlock_irqrestore(&conf->device_lock, flags);
2772         conf->raid_disks = mddev->raid_disks = raid_disks;
2773         mddev->delta_disks = 0;
2774
2775         conf->last_used = 0; /* just make sure it is in-range */
2776         lower_barrier(conf);
2777
2778         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2779         md_wakeup_thread(mddev->thread);
2780
2781         mempool_destroy(oldpool);
2782         return 0;
2783 }
2784
2785 static void raid1_quiesce(struct mddev *mddev, int state)
2786 {
2787         struct r1conf *conf = mddev->private;
2788
2789         switch(state) {
2790         case 2: /* wake for suspend */
2791                 wake_up(&conf->wait_barrier);
2792                 break;
2793         case 1:
2794                 raise_barrier(conf);
2795                 break;
2796         case 0:
2797                 lower_barrier(conf);
2798                 break;
2799         }
2800 }
2801
2802 static void *raid1_takeover(struct mddev *mddev)
2803 {
2804         /* raid1 can take over:
2805          *  raid5 with 2 devices, any layout or chunk size
2806          */
2807         if (mddev->level == 5 && mddev->raid_disks == 2) {
2808                 struct r1conf *conf;
2809                 mddev->new_level = 1;
2810                 mddev->new_layout = 0;
2811                 mddev->new_chunk_sectors = 0;
2812                 conf = setup_conf(mddev);
2813                 if (!IS_ERR(conf))
2814                         conf->barrier = 1;
2815                 return conf;
2816         }
2817         return ERR_PTR(-EINVAL);
2818 }
2819
2820 static struct md_personality raid1_personality =
2821 {
2822         .name           = "raid1",
2823         .level          = 1,
2824         .owner          = THIS_MODULE,
2825         .make_request   = make_request,
2826         .run            = run,
2827         .stop           = stop,
2828         .status         = status,
2829         .error_handler  = error,
2830         .hot_add_disk   = raid1_add_disk,
2831         .hot_remove_disk= raid1_remove_disk,
2832         .spare_active   = raid1_spare_active,
2833         .sync_request   = sync_request,
2834         .resize         = raid1_resize,
2835         .size           = raid1_size,
2836         .check_reshape  = raid1_reshape,
2837         .quiesce        = raid1_quiesce,
2838         .takeover       = raid1_takeover,
2839 };
2840
2841 static int __init raid_init(void)
2842 {
2843         return register_md_personality(&raid1_personality);
2844 }
2845
2846 static void raid_exit(void)
2847 {
2848         unregister_md_personality(&raid1_personality);
2849 }
2850
2851 module_init(raid_init);
2852 module_exit(raid_exit);
2853 MODULE_LICENSE("GPL");
2854 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
2855 MODULE_ALIAS("md-personality-3"); /* RAID1 */
2856 MODULE_ALIAS("md-raid1");
2857 MODULE_ALIAS("md-level-1");
2858
2859 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);