dm thin: correct comments
[~shefty/rdma-dev.git] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8
9 #include <linux/device-mapper.h>
10 #include <linux/dm-io.h>
11 #include <linux/dm-kcopyd.h>
12 #include <linux/list.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16
17 #define DM_MSG_PREFIX   "thin"
18
19 /*
20  * Tunable constants
21  */
22 #define ENDIO_HOOK_POOL_SIZE 10240
23 #define DEFERRED_SET_SIZE 64
24 #define MAPPING_POOL_SIZE 1024
25 #define PRISON_CELLS 1024
26
27 /*
28  * The block size of the device holding pool data must be
29  * between 64KB and 1GB.
30  */
31 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
32 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
33
34 /*
35  * The metadata device is currently limited in size.  The limitation is
36  * checked lower down in dm-space-map-metadata, but we also check it here
37  * so we can fail early.
38  *
39  * We have one block of index, which can hold 255 index entries.  Each
40  * index entry contains allocation info about 16k metadata blocks.
41  */
42 #define METADATA_DEV_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
43
44 /*
45  * Device id is restricted to 24 bits.
46  */
47 #define MAX_DEV_ID ((1 << 24) - 1)
48
49 /*
50  * How do we handle breaking sharing of data blocks?
51  * =================================================
52  *
53  * We use a standard copy-on-write btree to store the mappings for the
54  * devices (note I'm talking about copy-on-write of the metadata here, not
55  * the data).  When you take an internal snapshot you clone the root node
56  * of the origin btree.  After this there is no concept of an origin or a
57  * snapshot.  They are just two device trees that happen to point to the
58  * same data blocks.
59  *
60  * When we get a write in we decide if it's to a shared data block using
61  * some timestamp magic.  If it is, we have to break sharing.
62  *
63  * Let's say we write to a shared block in what was the origin.  The
64  * steps are:
65  *
66  * i) plug io further to this physical block. (see bio_prison code).
67  *
68  * ii) quiesce any read io to that shared data block.  Obviously
69  * including all devices that share this block.  (see deferred_set code)
70  *
71  * iii) copy the data block to a newly allocate block.  This step can be
72  * missed out if the io covers the block. (schedule_copy).
73  *
74  * iv) insert the new mapping into the origin's btree
75  * (process_prepared_mapping).  This act of inserting breaks some
76  * sharing of btree nodes between the two devices.  Breaking sharing only
77  * effects the btree of that specific device.  Btrees for the other
78  * devices that share the block never change.  The btree for the origin
79  * device as it was after the last commit is untouched, ie. we're using
80  * persistent data structures in the functional programming sense.
81  *
82  * v) unplug io to this physical block, including the io that triggered
83  * the breaking of sharing.
84  *
85  * Steps (ii) and (iii) occur in parallel.
86  *
87  * The metadata _doesn't_ need to be committed before the io continues.  We
88  * get away with this because the io is always written to a _new_ block.
89  * If there's a crash, then:
90  *
91  * - The origin mapping will point to the old origin block (the shared
92  * one).  This will contain the data as it was before the io that triggered
93  * the breaking of sharing came in.
94  *
95  * - The snap mapping still points to the old block.  As it would after
96  * the commit.
97  *
98  * The downside of this scheme is the timestamp magic isn't perfect, and
99  * will continue to think that data block in the snapshot device is shared
100  * even after the write to the origin has broken sharing.  I suspect data
101  * blocks will typically be shared by many different devices, so we're
102  * breaking sharing n + 1 times, rather than n, where n is the number of
103  * devices that reference this data block.  At the moment I think the
104  * benefits far, far outweigh the disadvantages.
105  */
106
107 /*----------------------------------------------------------------*/
108
109 /*
110  * Sometimes we can't deal with a bio straight away.  We put them in prison
111  * where they can't cause any mischief.  Bios are put in a cell identified
112  * by a key, multiple bios can be in the same cell.  When the cell is
113  * subsequently unlocked the bios become available.
114  */
115 struct bio_prison;
116
117 struct cell_key {
118         int virtual;
119         dm_thin_id dev;
120         dm_block_t block;
121 };
122
123 struct cell {
124         struct hlist_node list;
125         struct bio_prison *prison;
126         struct cell_key key;
127         struct bio *holder;
128         struct bio_list bios;
129 };
130
131 struct bio_prison {
132         spinlock_t lock;
133         mempool_t *cell_pool;
134
135         unsigned nr_buckets;
136         unsigned hash_mask;
137         struct hlist_head *cells;
138 };
139
140 static uint32_t calc_nr_buckets(unsigned nr_cells)
141 {
142         uint32_t n = 128;
143
144         nr_cells /= 4;
145         nr_cells = min(nr_cells, 8192u);
146
147         while (n < nr_cells)
148                 n <<= 1;
149
150         return n;
151 }
152
153 /*
154  * @nr_cells should be the number of cells you want in use _concurrently_.
155  * Don't confuse it with the number of distinct keys.
156  */
157 static struct bio_prison *prison_create(unsigned nr_cells)
158 {
159         unsigned i;
160         uint32_t nr_buckets = calc_nr_buckets(nr_cells);
161         size_t len = sizeof(struct bio_prison) +
162                 (sizeof(struct hlist_head) * nr_buckets);
163         struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
164
165         if (!prison)
166                 return NULL;
167
168         spin_lock_init(&prison->lock);
169         prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
170                                                         sizeof(struct cell));
171         if (!prison->cell_pool) {
172                 kfree(prison);
173                 return NULL;
174         }
175
176         prison->nr_buckets = nr_buckets;
177         prison->hash_mask = nr_buckets - 1;
178         prison->cells = (struct hlist_head *) (prison + 1);
179         for (i = 0; i < nr_buckets; i++)
180                 INIT_HLIST_HEAD(prison->cells + i);
181
182         return prison;
183 }
184
185 static void prison_destroy(struct bio_prison *prison)
186 {
187         mempool_destroy(prison->cell_pool);
188         kfree(prison);
189 }
190
191 static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
192 {
193         const unsigned long BIG_PRIME = 4294967291UL;
194         uint64_t hash = key->block * BIG_PRIME;
195
196         return (uint32_t) (hash & prison->hash_mask);
197 }
198
199 static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
200 {
201                return (lhs->virtual == rhs->virtual) &&
202                        (lhs->dev == rhs->dev) &&
203                        (lhs->block == rhs->block);
204 }
205
206 static struct cell *__search_bucket(struct hlist_head *bucket,
207                                     struct cell_key *key)
208 {
209         struct cell *cell;
210         struct hlist_node *tmp;
211
212         hlist_for_each_entry(cell, tmp, bucket, list)
213                 if (keys_equal(&cell->key, key))
214                         return cell;
215
216         return NULL;
217 }
218
219 /*
220  * This may block if a new cell needs allocating.  You must ensure that
221  * cells will be unlocked even if the calling thread is blocked.
222  *
223  * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
224  */
225 static int bio_detain(struct bio_prison *prison, struct cell_key *key,
226                       struct bio *inmate, struct cell **ref)
227 {
228         int r = 1;
229         unsigned long flags;
230         uint32_t hash = hash_key(prison, key);
231         struct cell *cell, *cell2;
232
233         BUG_ON(hash > prison->nr_buckets);
234
235         spin_lock_irqsave(&prison->lock, flags);
236
237         cell = __search_bucket(prison->cells + hash, key);
238         if (cell) {
239                 bio_list_add(&cell->bios, inmate);
240                 goto out;
241         }
242
243         /*
244          * Allocate a new cell
245          */
246         spin_unlock_irqrestore(&prison->lock, flags);
247         cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
248         spin_lock_irqsave(&prison->lock, flags);
249
250         /*
251          * We've been unlocked, so we have to double check that
252          * nobody else has inserted this cell in the meantime.
253          */
254         cell = __search_bucket(prison->cells + hash, key);
255         if (cell) {
256                 mempool_free(cell2, prison->cell_pool);
257                 bio_list_add(&cell->bios, inmate);
258                 goto out;
259         }
260
261         /*
262          * Use new cell.
263          */
264         cell = cell2;
265
266         cell->prison = prison;
267         memcpy(&cell->key, key, sizeof(cell->key));
268         cell->holder = inmate;
269         bio_list_init(&cell->bios);
270         hlist_add_head(&cell->list, prison->cells + hash);
271
272         r = 0;
273
274 out:
275         spin_unlock_irqrestore(&prison->lock, flags);
276
277         *ref = cell;
278
279         return r;
280 }
281
282 /*
283  * @inmates must have been initialised prior to this call
284  */
285 static void __cell_release(struct cell *cell, struct bio_list *inmates)
286 {
287         struct bio_prison *prison = cell->prison;
288
289         hlist_del(&cell->list);
290
291         bio_list_add(inmates, cell->holder);
292         bio_list_merge(inmates, &cell->bios);
293
294         mempool_free(cell, prison->cell_pool);
295 }
296
297 static void cell_release(struct cell *cell, struct bio_list *bios)
298 {
299         unsigned long flags;
300         struct bio_prison *prison = cell->prison;
301
302         spin_lock_irqsave(&prison->lock, flags);
303         __cell_release(cell, bios);
304         spin_unlock_irqrestore(&prison->lock, flags);
305 }
306
307 /*
308  * There are a couple of places where we put a bio into a cell briefly
309  * before taking it out again.  In these situations we know that no other
310  * bio may be in the cell.  This function releases the cell, and also does
311  * a sanity check.
312  */
313 static void __cell_release_singleton(struct cell *cell, struct bio *bio)
314 {
315         hlist_del(&cell->list);
316         BUG_ON(cell->holder != bio);
317         BUG_ON(!bio_list_empty(&cell->bios));
318 }
319
320 static void cell_release_singleton(struct cell *cell, struct bio *bio)
321 {
322         unsigned long flags;
323         struct bio_prison *prison = cell->prison;
324
325         spin_lock_irqsave(&prison->lock, flags);
326         __cell_release_singleton(cell, bio);
327         spin_unlock_irqrestore(&prison->lock, flags);
328 }
329
330 /*
331  * Sometimes we don't want the holder, just the additional bios.
332  */
333 static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
334 {
335         struct bio_prison *prison = cell->prison;
336
337         hlist_del(&cell->list);
338         bio_list_merge(inmates, &cell->bios);
339
340         mempool_free(cell, prison->cell_pool);
341 }
342
343 static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
344 {
345         unsigned long flags;
346         struct bio_prison *prison = cell->prison;
347
348         spin_lock_irqsave(&prison->lock, flags);
349         __cell_release_no_holder(cell, inmates);
350         spin_unlock_irqrestore(&prison->lock, flags);
351 }
352
353 static void cell_error(struct cell *cell)
354 {
355         struct bio_prison *prison = cell->prison;
356         struct bio_list bios;
357         struct bio *bio;
358         unsigned long flags;
359
360         bio_list_init(&bios);
361
362         spin_lock_irqsave(&prison->lock, flags);
363         __cell_release(cell, &bios);
364         spin_unlock_irqrestore(&prison->lock, flags);
365
366         while ((bio = bio_list_pop(&bios)))
367                 bio_io_error(bio);
368 }
369
370 /*----------------------------------------------------------------*/
371
372 /*
373  * We use the deferred set to keep track of pending reads to shared blocks.
374  * We do this to ensure the new mapping caused by a write isn't performed
375  * until these prior reads have completed.  Otherwise the insertion of the
376  * new mapping could free the old block that the read bios are mapped to.
377  */
378
379 struct deferred_set;
380 struct deferred_entry {
381         struct deferred_set *ds;
382         unsigned count;
383         struct list_head work_items;
384 };
385
386 struct deferred_set {
387         spinlock_t lock;
388         unsigned current_entry;
389         unsigned sweeper;
390         struct deferred_entry entries[DEFERRED_SET_SIZE];
391 };
392
393 static void ds_init(struct deferred_set *ds)
394 {
395         int i;
396
397         spin_lock_init(&ds->lock);
398         ds->current_entry = 0;
399         ds->sweeper = 0;
400         for (i = 0; i < DEFERRED_SET_SIZE; i++) {
401                 ds->entries[i].ds = ds;
402                 ds->entries[i].count = 0;
403                 INIT_LIST_HEAD(&ds->entries[i].work_items);
404         }
405 }
406
407 static struct deferred_entry *ds_inc(struct deferred_set *ds)
408 {
409         unsigned long flags;
410         struct deferred_entry *entry;
411
412         spin_lock_irqsave(&ds->lock, flags);
413         entry = ds->entries + ds->current_entry;
414         entry->count++;
415         spin_unlock_irqrestore(&ds->lock, flags);
416
417         return entry;
418 }
419
420 static unsigned ds_next(unsigned index)
421 {
422         return (index + 1) % DEFERRED_SET_SIZE;
423 }
424
425 static void __sweep(struct deferred_set *ds, struct list_head *head)
426 {
427         while ((ds->sweeper != ds->current_entry) &&
428                !ds->entries[ds->sweeper].count) {
429                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
430                 ds->sweeper = ds_next(ds->sweeper);
431         }
432
433         if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
434                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
435 }
436
437 static void ds_dec(struct deferred_entry *entry, struct list_head *head)
438 {
439         unsigned long flags;
440
441         spin_lock_irqsave(&entry->ds->lock, flags);
442         BUG_ON(!entry->count);
443         --entry->count;
444         __sweep(entry->ds, head);
445         spin_unlock_irqrestore(&entry->ds->lock, flags);
446 }
447
448 /*
449  * Returns 1 if deferred or 0 if no pending items to delay job.
450  */
451 static int ds_add_work(struct deferred_set *ds, struct list_head *work)
452 {
453         int r = 1;
454         unsigned long flags;
455         unsigned next_entry;
456
457         spin_lock_irqsave(&ds->lock, flags);
458         if ((ds->sweeper == ds->current_entry) &&
459             !ds->entries[ds->current_entry].count)
460                 r = 0;
461         else {
462                 list_add(work, &ds->entries[ds->current_entry].work_items);
463                 next_entry = ds_next(ds->current_entry);
464                 if (!ds->entries[next_entry].count)
465                         ds->current_entry = next_entry;
466         }
467         spin_unlock_irqrestore(&ds->lock, flags);
468
469         return r;
470 }
471
472 /*----------------------------------------------------------------*/
473
474 /*
475  * Key building.
476  */
477 static void build_data_key(struct dm_thin_device *td,
478                            dm_block_t b, struct cell_key *key)
479 {
480         key->virtual = 0;
481         key->dev = dm_thin_dev_id(td);
482         key->block = b;
483 }
484
485 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
486                               struct cell_key *key)
487 {
488         key->virtual = 1;
489         key->dev = dm_thin_dev_id(td);
490         key->block = b;
491 }
492
493 /*----------------------------------------------------------------*/
494
495 /*
496  * A pool device ties together a metadata device and a data device.  It
497  * also provides the interface for creating and destroying internal
498  * devices.
499  */
500 struct new_mapping;
501 struct pool {
502         struct list_head list;
503         struct dm_target *ti;   /* Only set if a pool target is bound */
504
505         struct mapped_device *pool_md;
506         struct block_device *md_dev;
507         struct dm_pool_metadata *pmd;
508
509         uint32_t sectors_per_block;
510         unsigned block_shift;
511         dm_block_t offset_mask;
512         dm_block_t low_water_blocks;
513
514         unsigned zero_new_blocks:1;
515         unsigned low_water_triggered:1; /* A dm event has been sent */
516         unsigned no_free_space:1;       /* A -ENOSPC warning has been issued */
517
518         struct bio_prison *prison;
519         struct dm_kcopyd_client *copier;
520
521         struct workqueue_struct *wq;
522         struct work_struct worker;
523
524         unsigned ref_count;
525
526         spinlock_t lock;
527         struct bio_list deferred_bios;
528         struct bio_list deferred_flush_bios;
529         struct list_head prepared_mappings;
530
531         struct bio_list retry_on_resume_list;
532
533         struct deferred_set ds; /* FIXME: move to thin_c */
534
535         struct new_mapping *next_mapping;
536         mempool_t *mapping_pool;
537         mempool_t *endio_hook_pool;
538 };
539
540 /*
541  * Target context for a pool.
542  */
543 struct pool_c {
544         struct dm_target *ti;
545         struct pool *pool;
546         struct dm_dev *data_dev;
547         struct dm_dev *metadata_dev;
548         struct dm_target_callbacks callbacks;
549
550         dm_block_t low_water_blocks;
551         unsigned zero_new_blocks:1;
552 };
553
554 /*
555  * Target context for a thin.
556  */
557 struct thin_c {
558         struct dm_dev *pool_dev;
559         dm_thin_id dev_id;
560
561         struct pool *pool;
562         struct dm_thin_device *td;
563 };
564
565 /*----------------------------------------------------------------*/
566
567 /*
568  * A global list of pools that uses a struct mapped_device as a key.
569  */
570 static struct dm_thin_pool_table {
571         struct mutex mutex;
572         struct list_head pools;
573 } dm_thin_pool_table;
574
575 static void pool_table_init(void)
576 {
577         mutex_init(&dm_thin_pool_table.mutex);
578         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
579 }
580
581 static void __pool_table_insert(struct pool *pool)
582 {
583         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
584         list_add(&pool->list, &dm_thin_pool_table.pools);
585 }
586
587 static void __pool_table_remove(struct pool *pool)
588 {
589         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
590         list_del(&pool->list);
591 }
592
593 static struct pool *__pool_table_lookup(struct mapped_device *md)
594 {
595         struct pool *pool = NULL, *tmp;
596
597         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
598
599         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
600                 if (tmp->pool_md == md) {
601                         pool = tmp;
602                         break;
603                 }
604         }
605
606         return pool;
607 }
608
609 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
610 {
611         struct pool *pool = NULL, *tmp;
612
613         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
614
615         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
616                 if (tmp->md_dev == md_dev) {
617                         pool = tmp;
618                         break;
619                 }
620         }
621
622         return pool;
623 }
624
625 /*----------------------------------------------------------------*/
626
627 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
628 {
629         struct bio *bio;
630         struct bio_list bios;
631
632         bio_list_init(&bios);
633         bio_list_merge(&bios, master);
634         bio_list_init(master);
635
636         while ((bio = bio_list_pop(&bios))) {
637                 if (dm_get_mapinfo(bio)->ptr == tc)
638                         bio_endio(bio, DM_ENDIO_REQUEUE);
639                 else
640                         bio_list_add(master, bio);
641         }
642 }
643
644 static void requeue_io(struct thin_c *tc)
645 {
646         struct pool *pool = tc->pool;
647         unsigned long flags;
648
649         spin_lock_irqsave(&pool->lock, flags);
650         __requeue_bio_list(tc, &pool->deferred_bios);
651         __requeue_bio_list(tc, &pool->retry_on_resume_list);
652         spin_unlock_irqrestore(&pool->lock, flags);
653 }
654
655 /*
656  * This section of code contains the logic for processing a thin device's IO.
657  * Much of the code depends on pool object resources (lists, workqueues, etc)
658  * but most is exclusively called from the thin target rather than the thin-pool
659  * target.
660  */
661
662 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
663 {
664         return bio->bi_sector >> tc->pool->block_shift;
665 }
666
667 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
668 {
669         struct pool *pool = tc->pool;
670
671         bio->bi_bdev = tc->pool_dev->bdev;
672         bio->bi_sector = (block << pool->block_shift) +
673                 (bio->bi_sector & pool->offset_mask);
674 }
675
676 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
677                             dm_block_t block)
678 {
679         struct pool *pool = tc->pool;
680         unsigned long flags;
681
682         remap(tc, bio, block);
683
684         /*
685          * Batch together any FUA/FLUSH bios we find and then issue
686          * a single commit for them in process_deferred_bios().
687          */
688         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
689                 spin_lock_irqsave(&pool->lock, flags);
690                 bio_list_add(&pool->deferred_flush_bios, bio);
691                 spin_unlock_irqrestore(&pool->lock, flags);
692         } else
693                 generic_make_request(bio);
694 }
695
696 /*
697  * wake_worker() is used when new work is queued and when pool_resume is
698  * ready to continue deferred IO processing.
699  */
700 static void wake_worker(struct pool *pool)
701 {
702         queue_work(pool->wq, &pool->worker);
703 }
704
705 /*----------------------------------------------------------------*/
706
707 /*
708  * Bio endio functions.
709  */
710 struct endio_hook {
711         struct thin_c *tc;
712         bio_end_io_t *saved_bi_end_io;
713         struct deferred_entry *entry;
714 };
715
716 struct new_mapping {
717         struct list_head list;
718
719         int prepared;
720
721         struct thin_c *tc;
722         dm_block_t virt_block;
723         dm_block_t data_block;
724         struct cell *cell;
725         int err;
726
727         /*
728          * If the bio covers the whole area of a block then we can avoid
729          * zeroing or copying.  Instead this bio is hooked.  The bio will
730          * still be in the cell, so care has to be taken to avoid issuing
731          * the bio twice.
732          */
733         struct bio *bio;
734         bio_end_io_t *saved_bi_end_io;
735 };
736
737 static void __maybe_add_mapping(struct new_mapping *m)
738 {
739         struct pool *pool = m->tc->pool;
740
741         if (list_empty(&m->list) && m->prepared) {
742                 list_add(&m->list, &pool->prepared_mappings);
743                 wake_worker(pool);
744         }
745 }
746
747 static void copy_complete(int read_err, unsigned long write_err, void *context)
748 {
749         unsigned long flags;
750         struct new_mapping *m = context;
751         struct pool *pool = m->tc->pool;
752
753         m->err = read_err || write_err ? -EIO : 0;
754
755         spin_lock_irqsave(&pool->lock, flags);
756         m->prepared = 1;
757         __maybe_add_mapping(m);
758         spin_unlock_irqrestore(&pool->lock, flags);
759 }
760
761 static void overwrite_endio(struct bio *bio, int err)
762 {
763         unsigned long flags;
764         struct new_mapping *m = dm_get_mapinfo(bio)->ptr;
765         struct pool *pool = m->tc->pool;
766
767         m->err = err;
768
769         spin_lock_irqsave(&pool->lock, flags);
770         m->prepared = 1;
771         __maybe_add_mapping(m);
772         spin_unlock_irqrestore(&pool->lock, flags);
773 }
774
775 static void shared_read_endio(struct bio *bio, int err)
776 {
777         struct list_head mappings;
778         struct new_mapping *m, *tmp;
779         struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
780         unsigned long flags;
781         struct pool *pool = h->tc->pool;
782
783         bio->bi_end_io = h->saved_bi_end_io;
784         bio_endio(bio, err);
785
786         INIT_LIST_HEAD(&mappings);
787         ds_dec(h->entry, &mappings);
788
789         spin_lock_irqsave(&pool->lock, flags);
790         list_for_each_entry_safe(m, tmp, &mappings, list) {
791                 list_del(&m->list);
792                 INIT_LIST_HEAD(&m->list);
793                 __maybe_add_mapping(m);
794         }
795         spin_unlock_irqrestore(&pool->lock, flags);
796
797         mempool_free(h, pool->endio_hook_pool);
798 }
799
800 /*----------------------------------------------------------------*/
801
802 /*
803  * Workqueue.
804  */
805
806 /*
807  * Prepared mapping jobs.
808  */
809
810 /*
811  * This sends the bios in the cell back to the deferred_bios list.
812  */
813 static void cell_defer(struct thin_c *tc, struct cell *cell,
814                        dm_block_t data_block)
815 {
816         struct pool *pool = tc->pool;
817         unsigned long flags;
818
819         spin_lock_irqsave(&pool->lock, flags);
820         cell_release(cell, &pool->deferred_bios);
821         spin_unlock_irqrestore(&tc->pool->lock, flags);
822
823         wake_worker(pool);
824 }
825
826 /*
827  * Same as cell_defer above, except it omits one particular detainee,
828  * a write bio that covers the block and has already been processed.
829  */
830 static void cell_defer_except(struct thin_c *tc, struct cell *cell)
831 {
832         struct bio_list bios;
833         struct pool *pool = tc->pool;
834         unsigned long flags;
835
836         bio_list_init(&bios);
837
838         spin_lock_irqsave(&pool->lock, flags);
839         cell_release_no_holder(cell, &pool->deferred_bios);
840         spin_unlock_irqrestore(&pool->lock, flags);
841
842         wake_worker(pool);
843 }
844
845 static void process_prepared_mapping(struct new_mapping *m)
846 {
847         struct thin_c *tc = m->tc;
848         struct bio *bio;
849         int r;
850
851         bio = m->bio;
852         if (bio)
853                 bio->bi_end_io = m->saved_bi_end_io;
854
855         if (m->err) {
856                 cell_error(m->cell);
857                 return;
858         }
859
860         /*
861          * Commit the prepared block into the mapping btree.
862          * Any I/O for this block arriving after this point will get
863          * remapped to it directly.
864          */
865         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
866         if (r) {
867                 DMERR("dm_thin_insert_block() failed");
868                 cell_error(m->cell);
869                 return;
870         }
871
872         /*
873          * Release any bios held while the block was being provisioned.
874          * If we are processing a write bio that completely covers the block,
875          * we already processed it so can ignore it now when processing
876          * the bios in the cell.
877          */
878         if (bio) {
879                 cell_defer_except(tc, m->cell);
880                 bio_endio(bio, 0);
881         } else
882                 cell_defer(tc, m->cell, m->data_block);
883
884         list_del(&m->list);
885         mempool_free(m, tc->pool->mapping_pool);
886 }
887
888 static void process_prepared_mappings(struct pool *pool)
889 {
890         unsigned long flags;
891         struct list_head maps;
892         struct new_mapping *m, *tmp;
893
894         INIT_LIST_HEAD(&maps);
895         spin_lock_irqsave(&pool->lock, flags);
896         list_splice_init(&pool->prepared_mappings, &maps);
897         spin_unlock_irqrestore(&pool->lock, flags);
898
899         list_for_each_entry_safe(m, tmp, &maps, list)
900                 process_prepared_mapping(m);
901 }
902
903 /*
904  * Deferred bio jobs.
905  */
906 static int io_overwrites_block(struct pool *pool, struct bio *bio)
907 {
908         return ((bio_data_dir(bio) == WRITE) &&
909                 !(bio->bi_sector & pool->offset_mask)) &&
910                 (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
911 }
912
913 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
914                                bio_end_io_t *fn)
915 {
916         *save = bio->bi_end_io;
917         bio->bi_end_io = fn;
918 }
919
920 static int ensure_next_mapping(struct pool *pool)
921 {
922         if (pool->next_mapping)
923                 return 0;
924
925         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
926
927         return pool->next_mapping ? 0 : -ENOMEM;
928 }
929
930 static struct new_mapping *get_next_mapping(struct pool *pool)
931 {
932         struct new_mapping *r = pool->next_mapping;
933
934         BUG_ON(!pool->next_mapping);
935
936         pool->next_mapping = NULL;
937
938         return r;
939 }
940
941 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
942                           dm_block_t data_origin, dm_block_t data_dest,
943                           struct cell *cell, struct bio *bio)
944 {
945         int r;
946         struct pool *pool = tc->pool;
947         struct new_mapping *m = get_next_mapping(pool);
948
949         INIT_LIST_HEAD(&m->list);
950         m->prepared = 0;
951         m->tc = tc;
952         m->virt_block = virt_block;
953         m->data_block = data_dest;
954         m->cell = cell;
955         m->err = 0;
956         m->bio = NULL;
957
958         ds_add_work(&pool->ds, &m->list);
959
960         /*
961          * IO to pool_dev remaps to the pool target's data_dev.
962          *
963          * If the whole block of data is being overwritten, we can issue the
964          * bio immediately. Otherwise we use kcopyd to clone the data first.
965          */
966         if (io_overwrites_block(pool, bio)) {
967                 m->bio = bio;
968                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
969                 dm_get_mapinfo(bio)->ptr = m;
970                 remap_and_issue(tc, bio, data_dest);
971         } else {
972                 struct dm_io_region from, to;
973
974                 from.bdev = tc->pool_dev->bdev;
975                 from.sector = data_origin * pool->sectors_per_block;
976                 from.count = pool->sectors_per_block;
977
978                 to.bdev = tc->pool_dev->bdev;
979                 to.sector = data_dest * pool->sectors_per_block;
980                 to.count = pool->sectors_per_block;
981
982                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
983                                    0, copy_complete, m);
984                 if (r < 0) {
985                         mempool_free(m, pool->mapping_pool);
986                         DMERR("dm_kcopyd_copy() failed");
987                         cell_error(cell);
988                 }
989         }
990 }
991
992 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
993                           dm_block_t data_block, struct cell *cell,
994                           struct bio *bio)
995 {
996         struct pool *pool = tc->pool;
997         struct new_mapping *m = get_next_mapping(pool);
998
999         INIT_LIST_HEAD(&m->list);
1000         m->prepared = 0;
1001         m->tc = tc;
1002         m->virt_block = virt_block;
1003         m->data_block = data_block;
1004         m->cell = cell;
1005         m->err = 0;
1006         m->bio = NULL;
1007
1008         /*
1009          * If the whole block of data is being overwritten or we are not
1010          * zeroing pre-existing data, we can issue the bio immediately.
1011          * Otherwise we use kcopyd to zero the data first.
1012          */
1013         if (!pool->zero_new_blocks)
1014                 process_prepared_mapping(m);
1015
1016         else if (io_overwrites_block(pool, bio)) {
1017                 m->bio = bio;
1018                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1019                 dm_get_mapinfo(bio)->ptr = m;
1020                 remap_and_issue(tc, bio, data_block);
1021
1022         } else {
1023                 int r;
1024                 struct dm_io_region to;
1025
1026                 to.bdev = tc->pool_dev->bdev;
1027                 to.sector = data_block * pool->sectors_per_block;
1028                 to.count = pool->sectors_per_block;
1029
1030                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
1031                 if (r < 0) {
1032                         mempool_free(m, pool->mapping_pool);
1033                         DMERR("dm_kcopyd_zero() failed");
1034                         cell_error(cell);
1035                 }
1036         }
1037 }
1038
1039 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1040 {
1041         int r;
1042         dm_block_t free_blocks;
1043         unsigned long flags;
1044         struct pool *pool = tc->pool;
1045
1046         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1047         if (r)
1048                 return r;
1049
1050         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1051                 DMWARN("%s: reached low water mark, sending event.",
1052                        dm_device_name(pool->pool_md));
1053                 spin_lock_irqsave(&pool->lock, flags);
1054                 pool->low_water_triggered = 1;
1055                 spin_unlock_irqrestore(&pool->lock, flags);
1056                 dm_table_event(pool->ti->table);
1057         }
1058
1059         if (!free_blocks) {
1060                 if (pool->no_free_space)
1061                         return -ENOSPC;
1062                 else {
1063                         /*
1064                          * Try to commit to see if that will free up some
1065                          * more space.
1066                          */
1067                         r = dm_pool_commit_metadata(pool->pmd);
1068                         if (r) {
1069                                 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1070                                       __func__, r);
1071                                 return r;
1072                         }
1073
1074                         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1075                         if (r)
1076                                 return r;
1077
1078                         /*
1079                          * If we still have no space we set a flag to avoid
1080                          * doing all this checking and return -ENOSPC.
1081                          */
1082                         if (!free_blocks) {
1083                                 DMWARN("%s: no free space available.",
1084                                        dm_device_name(pool->pool_md));
1085                                 spin_lock_irqsave(&pool->lock, flags);
1086                                 pool->no_free_space = 1;
1087                                 spin_unlock_irqrestore(&pool->lock, flags);
1088                                 return -ENOSPC;
1089                         }
1090                 }
1091         }
1092
1093         r = dm_pool_alloc_data_block(pool->pmd, result);
1094         if (r)
1095                 return r;
1096
1097         return 0;
1098 }
1099
1100 /*
1101  * If we have run out of space, queue bios until the device is
1102  * resumed, presumably after having been reloaded with more space.
1103  */
1104 static void retry_on_resume(struct bio *bio)
1105 {
1106         struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
1107         struct pool *pool = tc->pool;
1108         unsigned long flags;
1109
1110         spin_lock_irqsave(&pool->lock, flags);
1111         bio_list_add(&pool->retry_on_resume_list, bio);
1112         spin_unlock_irqrestore(&pool->lock, flags);
1113 }
1114
1115 static void no_space(struct cell *cell)
1116 {
1117         struct bio *bio;
1118         struct bio_list bios;
1119
1120         bio_list_init(&bios);
1121         cell_release(cell, &bios);
1122
1123         while ((bio = bio_list_pop(&bios)))
1124                 retry_on_resume(bio);
1125 }
1126
1127 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1128                           struct cell_key *key,
1129                           struct dm_thin_lookup_result *lookup_result,
1130                           struct cell *cell)
1131 {
1132         int r;
1133         dm_block_t data_block;
1134
1135         r = alloc_data_block(tc, &data_block);
1136         switch (r) {
1137         case 0:
1138                 schedule_copy(tc, block, lookup_result->block,
1139                               data_block, cell, bio);
1140                 break;
1141
1142         case -ENOSPC:
1143                 no_space(cell);
1144                 break;
1145
1146         default:
1147                 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1148                 cell_error(cell);
1149                 break;
1150         }
1151 }
1152
1153 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1154                                dm_block_t block,
1155                                struct dm_thin_lookup_result *lookup_result)
1156 {
1157         struct cell *cell;
1158         struct pool *pool = tc->pool;
1159         struct cell_key key;
1160
1161         /*
1162          * If cell is already occupied, then sharing is already in the process
1163          * of being broken so we have nothing further to do here.
1164          */
1165         build_data_key(tc->td, lookup_result->block, &key);
1166         if (bio_detain(pool->prison, &key, bio, &cell))
1167                 return;
1168
1169         if (bio_data_dir(bio) == WRITE)
1170                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1171         else {
1172                 struct endio_hook *h;
1173                 h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1174
1175                 h->tc = tc;
1176                 h->entry = ds_inc(&pool->ds);
1177                 save_and_set_endio(bio, &h->saved_bi_end_io, shared_read_endio);
1178                 dm_get_mapinfo(bio)->ptr = h;
1179
1180                 cell_release_singleton(cell, bio);
1181                 remap_and_issue(tc, bio, lookup_result->block);
1182         }
1183 }
1184
1185 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1186                             struct cell *cell)
1187 {
1188         int r;
1189         dm_block_t data_block;
1190
1191         /*
1192          * Remap empty bios (flushes) immediately, without provisioning.
1193          */
1194         if (!bio->bi_size) {
1195                 cell_release_singleton(cell, bio);
1196                 remap_and_issue(tc, bio, 0);
1197                 return;
1198         }
1199
1200         /*
1201          * Fill read bios with zeroes and complete them immediately.
1202          */
1203         if (bio_data_dir(bio) == READ) {
1204                 zero_fill_bio(bio);
1205                 cell_release_singleton(cell, bio);
1206                 bio_endio(bio, 0);
1207                 return;
1208         }
1209
1210         r = alloc_data_block(tc, &data_block);
1211         switch (r) {
1212         case 0:
1213                 schedule_zero(tc, block, data_block, cell, bio);
1214                 break;
1215
1216         case -ENOSPC:
1217                 no_space(cell);
1218                 break;
1219
1220         default:
1221                 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1222                 cell_error(cell);
1223                 break;
1224         }
1225 }
1226
1227 static void process_bio(struct thin_c *tc, struct bio *bio)
1228 {
1229         int r;
1230         dm_block_t block = get_bio_block(tc, bio);
1231         struct cell *cell;
1232         struct cell_key key;
1233         struct dm_thin_lookup_result lookup_result;
1234
1235         /*
1236          * If cell is already occupied, then the block is already
1237          * being provisioned so we have nothing further to do here.
1238          */
1239         build_virtual_key(tc->td, block, &key);
1240         if (bio_detain(tc->pool->prison, &key, bio, &cell))
1241                 return;
1242
1243         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1244         switch (r) {
1245         case 0:
1246                 /*
1247                  * We can release this cell now.  This thread is the only
1248                  * one that puts bios into a cell, and we know there were
1249                  * no preceding bios.
1250                  */
1251                 /*
1252                  * TODO: this will probably have to change when discard goes
1253                  * back in.
1254                  */
1255                 cell_release_singleton(cell, bio);
1256
1257                 if (lookup_result.shared)
1258                         process_shared_bio(tc, bio, block, &lookup_result);
1259                 else
1260                         remap_and_issue(tc, bio, lookup_result.block);
1261                 break;
1262
1263         case -ENODATA:
1264                 provision_block(tc, bio, block, cell);
1265                 break;
1266
1267         default:
1268                 DMERR("dm_thin_find_block() failed, error = %d", r);
1269                 bio_io_error(bio);
1270                 break;
1271         }
1272 }
1273
1274 static void process_deferred_bios(struct pool *pool)
1275 {
1276         unsigned long flags;
1277         struct bio *bio;
1278         struct bio_list bios;
1279         int r;
1280
1281         bio_list_init(&bios);
1282
1283         spin_lock_irqsave(&pool->lock, flags);
1284         bio_list_merge(&bios, &pool->deferred_bios);
1285         bio_list_init(&pool->deferred_bios);
1286         spin_unlock_irqrestore(&pool->lock, flags);
1287
1288         while ((bio = bio_list_pop(&bios))) {
1289                 struct thin_c *tc = dm_get_mapinfo(bio)->ptr;
1290                 /*
1291                  * If we've got no free new_mapping structs, and processing
1292                  * this bio might require one, we pause until there are some
1293                  * prepared mappings to process.
1294                  */
1295                 if (ensure_next_mapping(pool)) {
1296                         spin_lock_irqsave(&pool->lock, flags);
1297                         bio_list_merge(&pool->deferred_bios, &bios);
1298                         spin_unlock_irqrestore(&pool->lock, flags);
1299
1300                         break;
1301                 }
1302                 process_bio(tc, bio);
1303         }
1304
1305         /*
1306          * If there are any deferred flush bios, we must commit
1307          * the metadata before issuing them.
1308          */
1309         bio_list_init(&bios);
1310         spin_lock_irqsave(&pool->lock, flags);
1311         bio_list_merge(&bios, &pool->deferred_flush_bios);
1312         bio_list_init(&pool->deferred_flush_bios);
1313         spin_unlock_irqrestore(&pool->lock, flags);
1314
1315         if (bio_list_empty(&bios))
1316                 return;
1317
1318         r = dm_pool_commit_metadata(pool->pmd);
1319         if (r) {
1320                 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1321                       __func__, r);
1322                 while ((bio = bio_list_pop(&bios)))
1323                         bio_io_error(bio);
1324                 return;
1325         }
1326
1327         while ((bio = bio_list_pop(&bios)))
1328                 generic_make_request(bio);
1329 }
1330
1331 static void do_worker(struct work_struct *ws)
1332 {
1333         struct pool *pool = container_of(ws, struct pool, worker);
1334
1335         process_prepared_mappings(pool);
1336         process_deferred_bios(pool);
1337 }
1338
1339 /*----------------------------------------------------------------*/
1340
1341 /*
1342  * Mapping functions.
1343  */
1344
1345 /*
1346  * Called only while mapping a thin bio to hand it over to the workqueue.
1347  */
1348 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1349 {
1350         unsigned long flags;
1351         struct pool *pool = tc->pool;
1352
1353         spin_lock_irqsave(&pool->lock, flags);
1354         bio_list_add(&pool->deferred_bios, bio);
1355         spin_unlock_irqrestore(&pool->lock, flags);
1356
1357         wake_worker(pool);
1358 }
1359
1360 /*
1361  * Non-blocking function called from the thin target's map function.
1362  */
1363 static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1364                         union map_info *map_context)
1365 {
1366         int r;
1367         struct thin_c *tc = ti->private;
1368         dm_block_t block = get_bio_block(tc, bio);
1369         struct dm_thin_device *td = tc->td;
1370         struct dm_thin_lookup_result result;
1371
1372         /*
1373          * Save the thin context for easy access from the deferred bio later.
1374          */
1375         map_context->ptr = tc;
1376
1377         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1378                 thin_defer_bio(tc, bio);
1379                 return DM_MAPIO_SUBMITTED;
1380         }
1381
1382         r = dm_thin_find_block(td, block, 0, &result);
1383
1384         /*
1385          * Note that we defer readahead too.
1386          */
1387         switch (r) {
1388         case 0:
1389                 if (unlikely(result.shared)) {
1390                         /*
1391                          * We have a race condition here between the
1392                          * result.shared value returned by the lookup and
1393                          * snapshot creation, which may cause new
1394                          * sharing.
1395                          *
1396                          * To avoid this always quiesce the origin before
1397                          * taking the snap.  You want to do this anyway to
1398                          * ensure a consistent application view
1399                          * (i.e. lockfs).
1400                          *
1401                          * More distant ancestors are irrelevant. The
1402                          * shared flag will be set in their case.
1403                          */
1404                         thin_defer_bio(tc, bio);
1405                         r = DM_MAPIO_SUBMITTED;
1406                 } else {
1407                         remap(tc, bio, result.block);
1408                         r = DM_MAPIO_REMAPPED;
1409                 }
1410                 break;
1411
1412         case -ENODATA:
1413                 /*
1414                  * In future, the failed dm_thin_find_block above could
1415                  * provide the hint to load the metadata into cache.
1416                  */
1417         case -EWOULDBLOCK:
1418                 thin_defer_bio(tc, bio);
1419                 r = DM_MAPIO_SUBMITTED;
1420                 break;
1421         }
1422
1423         return r;
1424 }
1425
1426 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1427 {
1428         int r;
1429         unsigned long flags;
1430         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1431
1432         spin_lock_irqsave(&pt->pool->lock, flags);
1433         r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1434         spin_unlock_irqrestore(&pt->pool->lock, flags);
1435
1436         if (!r) {
1437                 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1438                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1439         }
1440
1441         return r;
1442 }
1443
1444 static void __requeue_bios(struct pool *pool)
1445 {
1446         bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1447         bio_list_init(&pool->retry_on_resume_list);
1448 }
1449
1450 /*----------------------------------------------------------------
1451  * Binding of control targets to a pool object
1452  *--------------------------------------------------------------*/
1453 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1454 {
1455         struct pool_c *pt = ti->private;
1456
1457         pool->ti = ti;
1458         pool->low_water_blocks = pt->low_water_blocks;
1459         pool->zero_new_blocks = pt->zero_new_blocks;
1460
1461         return 0;
1462 }
1463
1464 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1465 {
1466         if (pool->ti == ti)
1467                 pool->ti = NULL;
1468 }
1469
1470 /*----------------------------------------------------------------
1471  * Pool creation
1472  *--------------------------------------------------------------*/
1473 static void __pool_destroy(struct pool *pool)
1474 {
1475         __pool_table_remove(pool);
1476
1477         if (dm_pool_metadata_close(pool->pmd) < 0)
1478                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1479
1480         prison_destroy(pool->prison);
1481         dm_kcopyd_client_destroy(pool->copier);
1482
1483         if (pool->wq)
1484                 destroy_workqueue(pool->wq);
1485
1486         if (pool->next_mapping)
1487                 mempool_free(pool->next_mapping, pool->mapping_pool);
1488         mempool_destroy(pool->mapping_pool);
1489         mempool_destroy(pool->endio_hook_pool);
1490         kfree(pool);
1491 }
1492
1493 static struct pool *pool_create(struct mapped_device *pool_md,
1494                                 struct block_device *metadata_dev,
1495                                 unsigned long block_size, char **error)
1496 {
1497         int r;
1498         void *err_p;
1499         struct pool *pool;
1500         struct dm_pool_metadata *pmd;
1501
1502         pmd = dm_pool_metadata_open(metadata_dev, block_size);
1503         if (IS_ERR(pmd)) {
1504                 *error = "Error creating metadata object";
1505                 return (struct pool *)pmd;
1506         }
1507
1508         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1509         if (!pool) {
1510                 *error = "Error allocating memory for pool";
1511                 err_p = ERR_PTR(-ENOMEM);
1512                 goto bad_pool;
1513         }
1514
1515         pool->pmd = pmd;
1516         pool->sectors_per_block = block_size;
1517         pool->block_shift = ffs(block_size) - 1;
1518         pool->offset_mask = block_size - 1;
1519         pool->low_water_blocks = 0;
1520         pool->zero_new_blocks = 1;
1521         pool->prison = prison_create(PRISON_CELLS);
1522         if (!pool->prison) {
1523                 *error = "Error creating pool's bio prison";
1524                 err_p = ERR_PTR(-ENOMEM);
1525                 goto bad_prison;
1526         }
1527
1528         pool->copier = dm_kcopyd_client_create();
1529         if (IS_ERR(pool->copier)) {
1530                 r = PTR_ERR(pool->copier);
1531                 *error = "Error creating pool's kcopyd client";
1532                 err_p = ERR_PTR(r);
1533                 goto bad_kcopyd_client;
1534         }
1535
1536         /*
1537          * Create singlethreaded workqueue that will service all devices
1538          * that use this metadata.
1539          */
1540         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1541         if (!pool->wq) {
1542                 *error = "Error creating pool's workqueue";
1543                 err_p = ERR_PTR(-ENOMEM);
1544                 goto bad_wq;
1545         }
1546
1547         INIT_WORK(&pool->worker, do_worker);
1548         spin_lock_init(&pool->lock);
1549         bio_list_init(&pool->deferred_bios);
1550         bio_list_init(&pool->deferred_flush_bios);
1551         INIT_LIST_HEAD(&pool->prepared_mappings);
1552         pool->low_water_triggered = 0;
1553         pool->no_free_space = 0;
1554         bio_list_init(&pool->retry_on_resume_list);
1555         ds_init(&pool->ds);
1556
1557         pool->next_mapping = NULL;
1558         pool->mapping_pool =
1559                 mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
1560         if (!pool->mapping_pool) {
1561                 *error = "Error creating pool's mapping mempool";
1562                 err_p = ERR_PTR(-ENOMEM);
1563                 goto bad_mapping_pool;
1564         }
1565
1566         pool->endio_hook_pool =
1567                 mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
1568         if (!pool->endio_hook_pool) {
1569                 *error = "Error creating pool's endio_hook mempool";
1570                 err_p = ERR_PTR(-ENOMEM);
1571                 goto bad_endio_hook_pool;
1572         }
1573         pool->ref_count = 1;
1574         pool->pool_md = pool_md;
1575         pool->md_dev = metadata_dev;
1576         __pool_table_insert(pool);
1577
1578         return pool;
1579
1580 bad_endio_hook_pool:
1581         mempool_destroy(pool->mapping_pool);
1582 bad_mapping_pool:
1583         destroy_workqueue(pool->wq);
1584 bad_wq:
1585         dm_kcopyd_client_destroy(pool->copier);
1586 bad_kcopyd_client:
1587         prison_destroy(pool->prison);
1588 bad_prison:
1589         kfree(pool);
1590 bad_pool:
1591         if (dm_pool_metadata_close(pmd))
1592                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1593
1594         return err_p;
1595 }
1596
1597 static void __pool_inc(struct pool *pool)
1598 {
1599         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1600         pool->ref_count++;
1601 }
1602
1603 static void __pool_dec(struct pool *pool)
1604 {
1605         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1606         BUG_ON(!pool->ref_count);
1607         if (!--pool->ref_count)
1608                 __pool_destroy(pool);
1609 }
1610
1611 static struct pool *__pool_find(struct mapped_device *pool_md,
1612                                 struct block_device *metadata_dev,
1613                                 unsigned long block_size, char **error)
1614 {
1615         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1616
1617         if (pool) {
1618                 if (pool->pool_md != pool_md)
1619                         return ERR_PTR(-EBUSY);
1620                 __pool_inc(pool);
1621
1622         } else {
1623                 pool = __pool_table_lookup(pool_md);
1624                 if (pool) {
1625                         if (pool->md_dev != metadata_dev)
1626                                 return ERR_PTR(-EINVAL);
1627                         __pool_inc(pool);
1628
1629                 } else
1630                         pool = pool_create(pool_md, metadata_dev, block_size, error);
1631         }
1632
1633         return pool;
1634 }
1635
1636 /*----------------------------------------------------------------
1637  * Pool target methods
1638  *--------------------------------------------------------------*/
1639 static void pool_dtr(struct dm_target *ti)
1640 {
1641         struct pool_c *pt = ti->private;
1642
1643         mutex_lock(&dm_thin_pool_table.mutex);
1644
1645         unbind_control_target(pt->pool, ti);
1646         __pool_dec(pt->pool);
1647         dm_put_device(ti, pt->metadata_dev);
1648         dm_put_device(ti, pt->data_dev);
1649         kfree(pt);
1650
1651         mutex_unlock(&dm_thin_pool_table.mutex);
1652 }
1653
1654 struct pool_features {
1655         unsigned zero_new_blocks:1;
1656 };
1657
1658 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1659                                struct dm_target *ti)
1660 {
1661         int r;
1662         unsigned argc;
1663         const char *arg_name;
1664
1665         static struct dm_arg _args[] = {
1666                 {0, 1, "Invalid number of pool feature arguments"},
1667         };
1668
1669         /*
1670          * No feature arguments supplied.
1671          */
1672         if (!as->argc)
1673                 return 0;
1674
1675         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1676         if (r)
1677                 return -EINVAL;
1678
1679         while (argc && !r) {
1680                 arg_name = dm_shift_arg(as);
1681                 argc--;
1682
1683                 if (!strcasecmp(arg_name, "skip_block_zeroing")) {
1684                         pf->zero_new_blocks = 0;
1685                         continue;
1686                 }
1687
1688                 ti->error = "Unrecognised pool feature requested";
1689                 r = -EINVAL;
1690         }
1691
1692         return r;
1693 }
1694
1695 /*
1696  * thin-pool <metadata dev> <data dev>
1697  *           <data block size (sectors)>
1698  *           <low water mark (blocks)>
1699  *           [<#feature args> [<arg>]*]
1700  *
1701  * Optional feature arguments are:
1702  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1703  */
1704 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1705 {
1706         int r;
1707         struct pool_c *pt;
1708         struct pool *pool;
1709         struct pool_features pf;
1710         struct dm_arg_set as;
1711         struct dm_dev *data_dev;
1712         unsigned long block_size;
1713         dm_block_t low_water_blocks;
1714         struct dm_dev *metadata_dev;
1715         sector_t metadata_dev_size;
1716
1717         /*
1718          * FIXME Remove validation from scope of lock.
1719          */
1720         mutex_lock(&dm_thin_pool_table.mutex);
1721
1722         if (argc < 4) {
1723                 ti->error = "Invalid argument count";
1724                 r = -EINVAL;
1725                 goto out_unlock;
1726         }
1727         as.argc = argc;
1728         as.argv = argv;
1729
1730         r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1731         if (r) {
1732                 ti->error = "Error opening metadata block device";
1733                 goto out_unlock;
1734         }
1735
1736         metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
1737         if (metadata_dev_size > METADATA_DEV_MAX_SECTORS) {
1738                 ti->error = "Metadata device is too large";
1739                 r = -EINVAL;
1740                 goto out_metadata;
1741         }
1742
1743         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1744         if (r) {
1745                 ti->error = "Error getting data device";
1746                 goto out_metadata;
1747         }
1748
1749         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1750             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1751             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1752             !is_power_of_2(block_size)) {
1753                 ti->error = "Invalid block size";
1754                 r = -EINVAL;
1755                 goto out;
1756         }
1757
1758         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1759                 ti->error = "Invalid low water mark";
1760                 r = -EINVAL;
1761                 goto out;
1762         }
1763
1764         /*
1765          * Set default pool features.
1766          */
1767         memset(&pf, 0, sizeof(pf));
1768         pf.zero_new_blocks = 1;
1769
1770         dm_consume_args(&as, 4);
1771         r = parse_pool_features(&as, &pf, ti);
1772         if (r)
1773                 goto out;
1774
1775         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1776         if (!pt) {
1777                 r = -ENOMEM;
1778                 goto out;
1779         }
1780
1781         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
1782                            block_size, &ti->error);
1783         if (IS_ERR(pool)) {
1784                 r = PTR_ERR(pool);
1785                 goto out_free_pt;
1786         }
1787
1788         pt->pool = pool;
1789         pt->ti = ti;
1790         pt->metadata_dev = metadata_dev;
1791         pt->data_dev = data_dev;
1792         pt->low_water_blocks = low_water_blocks;
1793         pt->zero_new_blocks = pf.zero_new_blocks;
1794         ti->num_flush_requests = 1;
1795         ti->num_discard_requests = 0;
1796         ti->private = pt;
1797
1798         pt->callbacks.congested_fn = pool_is_congested;
1799         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
1800
1801         mutex_unlock(&dm_thin_pool_table.mutex);
1802
1803         return 0;
1804
1805 out_free_pt:
1806         kfree(pt);
1807 out:
1808         dm_put_device(ti, data_dev);
1809 out_metadata:
1810         dm_put_device(ti, metadata_dev);
1811 out_unlock:
1812         mutex_unlock(&dm_thin_pool_table.mutex);
1813
1814         return r;
1815 }
1816
1817 static int pool_map(struct dm_target *ti, struct bio *bio,
1818                     union map_info *map_context)
1819 {
1820         int r;
1821         struct pool_c *pt = ti->private;
1822         struct pool *pool = pt->pool;
1823         unsigned long flags;
1824
1825         /*
1826          * As this is a singleton target, ti->begin is always zero.
1827          */
1828         spin_lock_irqsave(&pool->lock, flags);
1829         bio->bi_bdev = pt->data_dev->bdev;
1830         r = DM_MAPIO_REMAPPED;
1831         spin_unlock_irqrestore(&pool->lock, flags);
1832
1833         return r;
1834 }
1835
1836 /*
1837  * Retrieves the number of blocks of the data device from
1838  * the superblock and compares it to the actual device size,
1839  * thus resizing the data device in case it has grown.
1840  *
1841  * This both copes with opening preallocated data devices in the ctr
1842  * being followed by a resume
1843  * -and-
1844  * calling the resume method individually after userspace has
1845  * grown the data device in reaction to a table event.
1846  */
1847 static int pool_preresume(struct dm_target *ti)
1848 {
1849         int r;
1850         struct pool_c *pt = ti->private;
1851         struct pool *pool = pt->pool;
1852         dm_block_t data_size, sb_data_size;
1853
1854         /*
1855          * Take control of the pool object.
1856          */
1857         r = bind_control_target(pool, ti);
1858         if (r)
1859                 return r;
1860
1861         data_size = ti->len >> pool->block_shift;
1862         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
1863         if (r) {
1864                 DMERR("failed to retrieve data device size");
1865                 return r;
1866         }
1867
1868         if (data_size < sb_data_size) {
1869                 DMERR("pool target too small, is %llu blocks (expected %llu)",
1870                       data_size, sb_data_size);
1871                 return -EINVAL;
1872
1873         } else if (data_size > sb_data_size) {
1874                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
1875                 if (r) {
1876                         DMERR("failed to resize data device");
1877                         return r;
1878                 }
1879
1880                 r = dm_pool_commit_metadata(pool->pmd);
1881                 if (r) {
1882                         DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1883                               __func__, r);
1884                         return r;
1885                 }
1886         }
1887
1888         return 0;
1889 }
1890
1891 static void pool_resume(struct dm_target *ti)
1892 {
1893         struct pool_c *pt = ti->private;
1894         struct pool *pool = pt->pool;
1895         unsigned long flags;
1896
1897         spin_lock_irqsave(&pool->lock, flags);
1898         pool->low_water_triggered = 0;
1899         pool->no_free_space = 0;
1900         __requeue_bios(pool);
1901         spin_unlock_irqrestore(&pool->lock, flags);
1902
1903         wake_worker(pool);
1904 }
1905
1906 static void pool_postsuspend(struct dm_target *ti)
1907 {
1908         int r;
1909         struct pool_c *pt = ti->private;
1910         struct pool *pool = pt->pool;
1911
1912         flush_workqueue(pool->wq);
1913
1914         r = dm_pool_commit_metadata(pool->pmd);
1915         if (r < 0) {
1916                 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
1917                       __func__, r);
1918                 /* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/
1919         }
1920 }
1921
1922 static int check_arg_count(unsigned argc, unsigned args_required)
1923 {
1924         if (argc != args_required) {
1925                 DMWARN("Message received with %u arguments instead of %u.",
1926                        argc, args_required);
1927                 return -EINVAL;
1928         }
1929
1930         return 0;
1931 }
1932
1933 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
1934 {
1935         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
1936             *dev_id <= MAX_DEV_ID)
1937                 return 0;
1938
1939         if (warning)
1940                 DMWARN("Message received with invalid device id: %s", arg);
1941
1942         return -EINVAL;
1943 }
1944
1945 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
1946 {
1947         dm_thin_id dev_id;
1948         int r;
1949
1950         r = check_arg_count(argc, 2);
1951         if (r)
1952                 return r;
1953
1954         r = read_dev_id(argv[1], &dev_id, 1);
1955         if (r)
1956                 return r;
1957
1958         r = dm_pool_create_thin(pool->pmd, dev_id);
1959         if (r) {
1960                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
1961                        argv[1]);
1962                 return r;
1963         }
1964
1965         return 0;
1966 }
1967
1968 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
1969 {
1970         dm_thin_id dev_id;
1971         dm_thin_id origin_dev_id;
1972         int r;
1973
1974         r = check_arg_count(argc, 3);
1975         if (r)
1976                 return r;
1977
1978         r = read_dev_id(argv[1], &dev_id, 1);
1979         if (r)
1980                 return r;
1981
1982         r = read_dev_id(argv[2], &origin_dev_id, 1);
1983         if (r)
1984                 return r;
1985
1986         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
1987         if (r) {
1988                 DMWARN("Creation of new snapshot %s of device %s failed.",
1989                        argv[1], argv[2]);
1990                 return r;
1991         }
1992
1993         return 0;
1994 }
1995
1996 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
1997 {
1998         dm_thin_id dev_id;
1999         int r;
2000
2001         r = check_arg_count(argc, 2);
2002         if (r)
2003                 return r;
2004
2005         r = read_dev_id(argv[1], &dev_id, 1);
2006         if (r)
2007                 return r;
2008
2009         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2010         if (r)
2011                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2012
2013         return r;
2014 }
2015
2016 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2017 {
2018         dm_thin_id old_id, new_id;
2019         int r;
2020
2021         r = check_arg_count(argc, 3);
2022         if (r)
2023                 return r;
2024
2025         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2026                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2027                 return -EINVAL;
2028         }
2029
2030         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2031                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2032                 return -EINVAL;
2033         }
2034
2035         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2036         if (r) {
2037                 DMWARN("Failed to change transaction id from %s to %s.",
2038                        argv[1], argv[2]);
2039                 return r;
2040         }
2041
2042         return 0;
2043 }
2044
2045 /*
2046  * Messages supported:
2047  *   create_thin        <dev_id>
2048  *   create_snap        <dev_id> <origin_id>
2049  *   delete             <dev_id>
2050  *   trim               <dev_id> <new_size_in_sectors>
2051  *   set_transaction_id <current_trans_id> <new_trans_id>
2052  */
2053 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2054 {
2055         int r = -EINVAL;
2056         struct pool_c *pt = ti->private;
2057         struct pool *pool = pt->pool;
2058
2059         if (!strcasecmp(argv[0], "create_thin"))
2060                 r = process_create_thin_mesg(argc, argv, pool);
2061
2062         else if (!strcasecmp(argv[0], "create_snap"))
2063                 r = process_create_snap_mesg(argc, argv, pool);
2064
2065         else if (!strcasecmp(argv[0], "delete"))
2066                 r = process_delete_mesg(argc, argv, pool);
2067
2068         else if (!strcasecmp(argv[0], "set_transaction_id"))
2069                 r = process_set_transaction_id_mesg(argc, argv, pool);
2070
2071         else
2072                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2073
2074         if (!r) {
2075                 r = dm_pool_commit_metadata(pool->pmd);
2076                 if (r)
2077                         DMERR("%s message: dm_pool_commit_metadata() failed, error = %d",
2078                               argv[0], r);
2079         }
2080
2081         return r;
2082 }
2083
2084 /*
2085  * Status line is:
2086  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2087  *    <used data sectors>/<total data sectors> <held metadata root>
2088  */
2089 static int pool_status(struct dm_target *ti, status_type_t type,
2090                        char *result, unsigned maxlen)
2091 {
2092         int r;
2093         unsigned sz = 0;
2094         uint64_t transaction_id;
2095         dm_block_t nr_free_blocks_data;
2096         dm_block_t nr_free_blocks_metadata;
2097         dm_block_t nr_blocks_data;
2098         dm_block_t nr_blocks_metadata;
2099         dm_block_t held_root;
2100         char buf[BDEVNAME_SIZE];
2101         char buf2[BDEVNAME_SIZE];
2102         struct pool_c *pt = ti->private;
2103         struct pool *pool = pt->pool;
2104
2105         switch (type) {
2106         case STATUSTYPE_INFO:
2107                 r = dm_pool_get_metadata_transaction_id(pool->pmd,
2108                                                         &transaction_id);
2109                 if (r)
2110                         return r;
2111
2112                 r = dm_pool_get_free_metadata_block_count(pool->pmd,
2113                                                           &nr_free_blocks_metadata);
2114                 if (r)
2115                         return r;
2116
2117                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2118                 if (r)
2119                         return r;
2120
2121                 r = dm_pool_get_free_block_count(pool->pmd,
2122                                                  &nr_free_blocks_data);
2123                 if (r)
2124                         return r;
2125
2126                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2127                 if (r)
2128                         return r;
2129
2130                 r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
2131                 if (r)
2132                         return r;
2133
2134                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2135                        (unsigned long long)transaction_id,
2136                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2137                        (unsigned long long)nr_blocks_metadata,
2138                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2139                        (unsigned long long)nr_blocks_data);
2140
2141                 if (held_root)
2142                         DMEMIT("%llu", held_root);
2143                 else
2144                         DMEMIT("-");
2145
2146                 break;
2147
2148         case STATUSTYPE_TABLE:
2149                 DMEMIT("%s %s %lu %llu ",
2150                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2151                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2152                        (unsigned long)pool->sectors_per_block,
2153                        (unsigned long long)pt->low_water_blocks);
2154
2155                 DMEMIT("%u ", !pool->zero_new_blocks);
2156
2157                 if (!pool->zero_new_blocks)
2158                         DMEMIT("skip_block_zeroing ");
2159                 break;
2160         }
2161
2162         return 0;
2163 }
2164
2165 static int pool_iterate_devices(struct dm_target *ti,
2166                                 iterate_devices_callout_fn fn, void *data)
2167 {
2168         struct pool_c *pt = ti->private;
2169
2170         return fn(ti, pt->data_dev, 0, ti->len, data);
2171 }
2172
2173 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2174                       struct bio_vec *biovec, int max_size)
2175 {
2176         struct pool_c *pt = ti->private;
2177         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2178
2179         if (!q->merge_bvec_fn)
2180                 return max_size;
2181
2182         bvm->bi_bdev = pt->data_dev->bdev;
2183
2184         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2185 }
2186
2187 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2188 {
2189         struct pool_c *pt = ti->private;
2190         struct pool *pool = pt->pool;
2191
2192         blk_limits_io_min(limits, 0);
2193         blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2194 }
2195
2196 static struct target_type pool_target = {
2197         .name = "thin-pool",
2198         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2199                     DM_TARGET_IMMUTABLE,
2200         .version = {1, 0, 0},
2201         .module = THIS_MODULE,
2202         .ctr = pool_ctr,
2203         .dtr = pool_dtr,
2204         .map = pool_map,
2205         .postsuspend = pool_postsuspend,
2206         .preresume = pool_preresume,
2207         .resume = pool_resume,
2208         .message = pool_message,
2209         .status = pool_status,
2210         .merge = pool_merge,
2211         .iterate_devices = pool_iterate_devices,
2212         .io_hints = pool_io_hints,
2213 };
2214
2215 /*----------------------------------------------------------------
2216  * Thin target methods
2217  *--------------------------------------------------------------*/
2218 static void thin_dtr(struct dm_target *ti)
2219 {
2220         struct thin_c *tc = ti->private;
2221
2222         mutex_lock(&dm_thin_pool_table.mutex);
2223
2224         __pool_dec(tc->pool);
2225         dm_pool_close_thin_device(tc->td);
2226         dm_put_device(ti, tc->pool_dev);
2227         kfree(tc);
2228
2229         mutex_unlock(&dm_thin_pool_table.mutex);
2230 }
2231
2232 /*
2233  * Thin target parameters:
2234  *
2235  * <pool_dev> <dev_id>
2236  *
2237  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2238  * dev_id: the internal device identifier
2239  */
2240 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2241 {
2242         int r;
2243         struct thin_c *tc;
2244         struct dm_dev *pool_dev;
2245         struct mapped_device *pool_md;
2246
2247         mutex_lock(&dm_thin_pool_table.mutex);
2248
2249         if (argc != 2) {
2250                 ti->error = "Invalid argument count";
2251                 r = -EINVAL;
2252                 goto out_unlock;
2253         }
2254
2255         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2256         if (!tc) {
2257                 ti->error = "Out of memory";
2258                 r = -ENOMEM;
2259                 goto out_unlock;
2260         }
2261
2262         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2263         if (r) {
2264                 ti->error = "Error opening pool device";
2265                 goto bad_pool_dev;
2266         }
2267         tc->pool_dev = pool_dev;
2268
2269         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2270                 ti->error = "Invalid device id";
2271                 r = -EINVAL;
2272                 goto bad_common;
2273         }
2274
2275         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2276         if (!pool_md) {
2277                 ti->error = "Couldn't get pool mapped device";
2278                 r = -EINVAL;
2279                 goto bad_common;
2280         }
2281
2282         tc->pool = __pool_table_lookup(pool_md);
2283         if (!tc->pool) {
2284                 ti->error = "Couldn't find pool object";
2285                 r = -EINVAL;
2286                 goto bad_pool_lookup;
2287         }
2288         __pool_inc(tc->pool);
2289
2290         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2291         if (r) {
2292                 ti->error = "Couldn't open thin internal device";
2293                 goto bad_thin_open;
2294         }
2295
2296         ti->split_io = tc->pool->sectors_per_block;
2297         ti->num_flush_requests = 1;
2298         ti->num_discard_requests = 0;
2299         ti->discards_supported = 0;
2300
2301         dm_put(pool_md);
2302
2303         mutex_unlock(&dm_thin_pool_table.mutex);
2304
2305         return 0;
2306
2307 bad_thin_open:
2308         __pool_dec(tc->pool);
2309 bad_pool_lookup:
2310         dm_put(pool_md);
2311 bad_common:
2312         dm_put_device(ti, tc->pool_dev);
2313 bad_pool_dev:
2314         kfree(tc);
2315 out_unlock:
2316         mutex_unlock(&dm_thin_pool_table.mutex);
2317
2318         return r;
2319 }
2320
2321 static int thin_map(struct dm_target *ti, struct bio *bio,
2322                     union map_info *map_context)
2323 {
2324         bio->bi_sector -= ti->begin;
2325
2326         return thin_bio_map(ti, bio, map_context);
2327 }
2328
2329 static void thin_postsuspend(struct dm_target *ti)
2330 {
2331         if (dm_noflush_suspending(ti))
2332                 requeue_io((struct thin_c *)ti->private);
2333 }
2334
2335 /*
2336  * <nr mapped sectors> <highest mapped sector>
2337  */
2338 static int thin_status(struct dm_target *ti, status_type_t type,
2339                        char *result, unsigned maxlen)
2340 {
2341         int r;
2342         ssize_t sz = 0;
2343         dm_block_t mapped, highest;
2344         char buf[BDEVNAME_SIZE];
2345         struct thin_c *tc = ti->private;
2346
2347         if (!tc->td)
2348                 DMEMIT("-");
2349         else {
2350                 switch (type) {
2351                 case STATUSTYPE_INFO:
2352                         r = dm_thin_get_mapped_count(tc->td, &mapped);
2353                         if (r)
2354                                 return r;
2355
2356                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2357                         if (r < 0)
2358                                 return r;
2359
2360                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2361                         if (r)
2362                                 DMEMIT("%llu", ((highest + 1) *
2363                                                 tc->pool->sectors_per_block) - 1);
2364                         else
2365                                 DMEMIT("-");
2366                         break;
2367
2368                 case STATUSTYPE_TABLE:
2369                         DMEMIT("%s %lu",
2370                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2371                                (unsigned long) tc->dev_id);
2372                         break;
2373                 }
2374         }
2375
2376         return 0;
2377 }
2378
2379 static int thin_iterate_devices(struct dm_target *ti,
2380                                 iterate_devices_callout_fn fn, void *data)
2381 {
2382         dm_block_t blocks;
2383         struct thin_c *tc = ti->private;
2384
2385         /*
2386          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
2387          * we follow a more convoluted path through to the pool's target.
2388          */
2389         if (!tc->pool->ti)
2390                 return 0;       /* nothing is bound */
2391
2392         blocks = tc->pool->ti->len >> tc->pool->block_shift;
2393         if (blocks)
2394                 return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
2395
2396         return 0;
2397 }
2398
2399 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2400 {
2401         struct thin_c *tc = ti->private;
2402
2403         blk_limits_io_min(limits, 0);
2404         blk_limits_io_opt(limits, tc->pool->sectors_per_block << SECTOR_SHIFT);
2405 }
2406
2407 static struct target_type thin_target = {
2408         .name = "thin",
2409         .version = {1, 0, 0},
2410         .module = THIS_MODULE,
2411         .ctr = thin_ctr,
2412         .dtr = thin_dtr,
2413         .map = thin_map,
2414         .postsuspend = thin_postsuspend,
2415         .status = thin_status,
2416         .iterate_devices = thin_iterate_devices,
2417         .io_hints = thin_io_hints,
2418 };
2419
2420 /*----------------------------------------------------------------*/
2421
2422 static int __init dm_thin_init(void)
2423 {
2424         int r;
2425
2426         pool_table_init();
2427
2428         r = dm_register_target(&thin_target);
2429         if (r)
2430                 return r;
2431
2432         r = dm_register_target(&pool_target);
2433         if (r)
2434                 dm_unregister_target(&thin_target);
2435
2436         return r;
2437 }
2438
2439 static void dm_thin_exit(void)
2440 {
2441         dm_unregister_target(&thin_target);
2442         dm_unregister_target(&pool_target);
2443 }
2444
2445 module_init(dm_thin_init);
2446 module_exit(dm_thin_exit);
2447
2448 MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target");
2449 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2450 MODULE_LICENSE("GPL");