2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* dm-snapshot.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
|
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/device-mapper.h>
|
2009-01-05 20:04:54 -07:00
|
|
|
#include <linux/delay.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kdev_t.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/mempool.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2007-10-19 15:38:44 -06:00
|
|
|
#include <linux/log2.h>
|
2008-04-24 15:02:01 -06:00
|
|
|
#include <linux/dm-kcopyd.h>
|
2009-04-02 12:55:34 -06:00
|
|
|
#include <linux/workqueue.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2009-01-05 20:05:15 -07:00
|
|
|
#include "dm-exception-store.h"
|
2005-04-16 16:20:36 -06:00
|
|
|
#include "dm-bio-list.h"
|
|
|
|
|
2006-06-26 01:27:35 -06:00
|
|
|
#define DM_MSG_PREFIX "snapshots"
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* The percentage increment we will wake up users at
|
|
|
|
*/
|
|
|
|
#define WAKE_UP_PERCENT 5
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kcopyd priority of snapshot operations
|
|
|
|
*/
|
|
|
|
#define SNAPSHOT_COPY_PRIORITY 2
|
|
|
|
|
|
|
|
/*
|
2008-04-24 14:42:36 -06:00
|
|
|
* Reserve 1MB for each snapshot initially (with minimum of 1 page).
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2008-04-24 14:42:36 -06:00
|
|
|
#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
/*
|
|
|
|
* The size of the mempool used to track chunks in use.
|
|
|
|
*/
|
|
|
|
#define MIN_IOS 256
|
|
|
|
|
2009-04-02 12:55:34 -06:00
|
|
|
#define DM_TRACKED_CHUNK_HASH_SIZE 16
|
|
|
|
#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
|
|
|
|
(DM_TRACKED_CHUNK_HASH_SIZE - 1))
|
|
|
|
|
|
|
|
struct exception_table {
|
|
|
|
uint32_t hash_mask;
|
|
|
|
unsigned hash_shift;
|
|
|
|
struct list_head *table;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct dm_snapshot {
|
|
|
|
struct rw_semaphore lock;
|
|
|
|
|
|
|
|
struct dm_dev *origin;
|
|
|
|
|
|
|
|
/* List of snapshots per Origin */
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
/* You can't use a snapshot if this is 0 (e.g. if full) */
|
|
|
|
int valid;
|
|
|
|
|
|
|
|
/* Origin writes don't trigger exceptions until this is set */
|
|
|
|
int active;
|
|
|
|
|
|
|
|
/* Used for display of table */
|
|
|
|
char type;
|
|
|
|
|
|
|
|
mempool_t *pending_pool;
|
|
|
|
|
|
|
|
atomic_t pending_exceptions_count;
|
|
|
|
|
|
|
|
struct exception_table pending;
|
|
|
|
struct exception_table complete;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pe_lock protects all pending_exception operations and access
|
|
|
|
* as well as the snapshot_bios list.
|
|
|
|
*/
|
|
|
|
spinlock_t pe_lock;
|
|
|
|
|
|
|
|
/* The on disk metadata handler */
|
|
|
|
struct dm_exception_store *store;
|
|
|
|
|
|
|
|
struct dm_kcopyd_client *kcopyd_client;
|
|
|
|
|
|
|
|
/* Queue of snapshot writes for ksnapd to flush */
|
|
|
|
struct bio_list queued_bios;
|
|
|
|
struct work_struct queued_bios_work;
|
|
|
|
|
|
|
|
/* Chunks with outstanding reads */
|
|
|
|
mempool_t *tracked_chunk_pool;
|
|
|
|
spinlock_t tracked_chunk_lock;
|
|
|
|
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
|
|
|
|
};
|
|
|
|
|
2006-12-08 03:41:13 -07:00
|
|
|
static struct workqueue_struct *ksnapd;
|
2006-11-22 07:57:56 -07:00
|
|
|
static void flush_queued_bios(struct work_struct *work);
|
2006-10-03 02:15:30 -06:00
|
|
|
|
2009-04-02 12:55:34 -06:00
|
|
|
static sector_t chunk_to_sector(struct dm_exception_store *store,
|
|
|
|
chunk_t chunk)
|
|
|
|
{
|
|
|
|
return chunk << store->chunk_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* There is only ever one instance of a particular block
|
|
|
|
* device so we can compare pointers safely.
|
|
|
|
*/
|
|
|
|
return lhs == rhs;
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_pending_exception {
|
|
|
|
struct dm_snap_exception e;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Origin buffers waiting for this to complete are held
|
|
|
|
* in a bio list
|
|
|
|
*/
|
|
|
|
struct bio_list origin_bios;
|
|
|
|
struct bio_list snapshot_bios;
|
|
|
|
|
2006-03-27 02:17:42 -07:00
|
|
|
/*
|
|
|
|
* Short-term queue of pending exceptions prior to submission.
|
|
|
|
*/
|
|
|
|
struct list_head list;
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
2006-03-27 02:17:44 -07:00
|
|
|
* The primary pending_exception is the one that holds
|
2006-10-03 02:15:30 -06:00
|
|
|
* the ref_count and the list of origin_bios for a
|
2006-03-27 02:17:44 -07:00
|
|
|
* group of pending_exceptions. It is always last to get freed.
|
|
|
|
* These fields get set up when writing to the origin.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_pending_exception *primary_pe;
|
2006-03-27 02:17:44 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of pending_exceptions processing this chunk.
|
|
|
|
* When this drops to zero we must complete the origin bios.
|
|
|
|
* If incrementing or decrementing this, hold pe->snap->lock for
|
|
|
|
* the sibling concerned and not pe->primary_pe->snap->lock unless
|
|
|
|
* they are the same.
|
|
|
|
*/
|
2006-10-03 02:15:30 -06:00
|
|
|
atomic_t ref_count;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Pointer back to snapshot context */
|
|
|
|
struct dm_snapshot *snap;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 1 indicates the exception has already been sent to
|
|
|
|
* kcopyd.
|
|
|
|
*/
|
|
|
|
int started;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hash table mapping origin volumes to lists of snapshots and
|
|
|
|
* a lock to protect it
|
|
|
|
*/
|
2006-12-06 21:33:20 -07:00
|
|
|
static struct kmem_cache *exception_cache;
|
|
|
|
static struct kmem_cache *pending_cache;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
struct dm_snap_tracked_chunk {
|
|
|
|
struct hlist_node node;
|
|
|
|
chunk_t chunk;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kmem_cache *tracked_chunk_cache;
|
|
|
|
|
|
|
|
static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
|
|
|
|
chunk_t chunk)
|
|
|
|
{
|
|
|
|
struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
|
|
|
|
GFP_NOIO);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
c->chunk = chunk;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&s->tracked_chunk_lock, flags);
|
|
|
|
hlist_add_head(&c->node,
|
|
|
|
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
|
|
|
|
spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
|
|
|
|
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void stop_tracking_chunk(struct dm_snapshot *s,
|
|
|
|
struct dm_snap_tracked_chunk *c)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&s->tracked_chunk_lock, flags);
|
|
|
|
hlist_del(&c->node);
|
|
|
|
spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
|
|
|
|
|
|
|
|
mempool_free(c, s->tracked_chunk_pool);
|
|
|
|
}
|
|
|
|
|
2008-07-21 05:00:34 -06:00
|
|
|
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
|
|
|
|
{
|
|
|
|
struct dm_snap_tracked_chunk *c;
|
|
|
|
struct hlist_node *hn;
|
|
|
|
int found = 0;
|
|
|
|
|
|
|
|
spin_lock_irq(&s->tracked_chunk_lock);
|
|
|
|
|
|
|
|
hlist_for_each_entry(c, hn,
|
|
|
|
&s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
|
|
|
|
if (c->chunk == chunk) {
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irq(&s->tracked_chunk_lock);
|
|
|
|
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* One of these per registered origin, held in the snapshot_origins hash
|
|
|
|
*/
|
|
|
|
struct origin {
|
|
|
|
/* The origin device */
|
|
|
|
struct block_device *bdev;
|
|
|
|
|
|
|
|
struct list_head hash_list;
|
|
|
|
|
|
|
|
/* List of snapshots for this origin */
|
|
|
|
struct list_head snapshots;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Size of the hash table for origin volumes. If we make this
|
|
|
|
* the size of the minors list then it should be nearly perfect
|
|
|
|
*/
|
|
|
|
#define ORIGIN_HASH_SIZE 256
|
|
|
|
#define ORIGIN_MASK 0xFF
|
|
|
|
static struct list_head *_origins;
|
|
|
|
static struct rw_semaphore _origins_lock;
|
|
|
|
|
|
|
|
static int init_origin_hash(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!_origins) {
|
2006-06-26 01:27:35 -06:00
|
|
|
DMERR("unable to allocate memory");
|
2005-04-16 16:20:36 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
|
|
|
|
INIT_LIST_HEAD(_origins + i);
|
|
|
|
init_rwsem(&_origins_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void exit_origin_hash(void)
|
|
|
|
{
|
|
|
|
kfree(_origins);
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static unsigned origin_hash(struct block_device *bdev)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
return bdev->bd_dev & ORIGIN_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct origin *__lookup_origin(struct block_device *origin)
|
|
|
|
{
|
|
|
|
struct list_head *ol;
|
|
|
|
struct origin *o;
|
|
|
|
|
|
|
|
ol = &_origins[origin_hash(origin)];
|
|
|
|
list_for_each_entry (o, ol, hash_list)
|
|
|
|
if (bdev_equal(o->bdev, origin))
|
|
|
|
return o;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __insert_origin(struct origin *o)
|
|
|
|
{
|
|
|
|
struct list_head *sl = &_origins[origin_hash(o->bdev)];
|
|
|
|
list_add_tail(&o->hash_list, sl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a note of the snapshot and its origin so we can look it
|
|
|
|
* up when the origin has a write on it.
|
|
|
|
*/
|
|
|
|
static int register_snapshot(struct dm_snapshot *snap)
|
|
|
|
{
|
2008-10-30 07:33:12 -06:00
|
|
|
struct origin *o, *new_o;
|
2005-04-16 16:20:36 -06:00
|
|
|
struct block_device *bdev = snap->origin->bdev;
|
|
|
|
|
2008-10-30 07:33:12 -06:00
|
|
|
new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
|
|
|
|
if (!new_o)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
down_write(&_origins_lock);
|
|
|
|
o = __lookup_origin(bdev);
|
|
|
|
|
2008-10-30 07:33:12 -06:00
|
|
|
if (o)
|
|
|
|
kfree(new_o);
|
|
|
|
else {
|
2005-04-16 16:20:36 -06:00
|
|
|
/* New origin */
|
2008-10-30 07:33:12 -06:00
|
|
|
o = new_o;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Initialise the struct */
|
|
|
|
INIT_LIST_HEAD(&o->snapshots);
|
|
|
|
o->bdev = bdev;
|
|
|
|
|
|
|
|
__insert_origin(o);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(&snap->list, &o->snapshots);
|
|
|
|
|
|
|
|
up_write(&_origins_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unregister_snapshot(struct dm_snapshot *s)
|
|
|
|
{
|
|
|
|
struct origin *o;
|
|
|
|
|
|
|
|
down_write(&_origins_lock);
|
|
|
|
o = __lookup_origin(s->origin->bdev);
|
|
|
|
|
|
|
|
list_del(&s->list);
|
|
|
|
if (list_empty(&o->snapshots)) {
|
|
|
|
list_del(&o->hash_list);
|
|
|
|
kfree(o);
|
|
|
|
}
|
|
|
|
|
|
|
|
up_write(&_origins_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Implementation of the exception hash tables.
|
2008-02-07 19:11:27 -07:00
|
|
|
* The lowest hash_shift bits of the chunk number are ignored, allowing
|
|
|
|
* some consecutive chunks to be grouped together.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2008-02-07 19:11:27 -07:00
|
|
|
static int init_exception_table(struct exception_table *et, uint32_t size,
|
|
|
|
unsigned hash_shift)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2008-02-07 19:11:27 -07:00
|
|
|
et->hash_shift = hash_shift;
|
2005-04-16 16:20:36 -06:00
|
|
|
et->hash_mask = size - 1;
|
|
|
|
et->table = dm_vcalloc(size, sizeof(struct list_head));
|
|
|
|
if (!et->table)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++)
|
|
|
|
INIT_LIST_HEAD(et->table + i);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-12-06 21:33:20 -07:00
|
|
|
static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
struct list_head *slot;
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *ex, *next;
|
2005-04-16 16:20:36 -06:00
|
|
|
int i, size;
|
|
|
|
|
|
|
|
size = et->hash_mask + 1;
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
slot = et->table + i;
|
|
|
|
|
|
|
|
list_for_each_entry_safe (ex, next, slot, hash_list)
|
|
|
|
kmem_cache_free(mem, ex);
|
|
|
|
}
|
|
|
|
|
|
|
|
vfree(et->table);
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2008-02-07 19:11:27 -07:00
|
|
|
return (chunk >> et->hash_shift) & et->hash_mask;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static void insert_exception(struct exception_table *eh,
|
|
|
|
struct dm_snap_exception *e)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
|
|
|
|
list_add(&e->hash_list, l);
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static void remove_exception(struct dm_snap_exception *e)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
list_del(&e->hash_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the exception data for a sector, or NULL if not
|
|
|
|
* remapped.
|
|
|
|
*/
|
2007-07-12 10:26:32 -06:00
|
|
|
static struct dm_snap_exception *lookup_exception(struct exception_table *et,
|
|
|
|
chunk_t chunk)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
struct list_head *slot;
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
slot = &et->table[exception_hash(et, chunk)];
|
|
|
|
list_for_each_entry (e, slot, hash_list)
|
2008-02-07 19:11:27 -07:00
|
|
|
if (chunk >= e->old_chunk &&
|
|
|
|
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
|
2005-04-16 16:20:36 -06:00
|
|
|
return e;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static struct dm_snap_exception *alloc_exception(void)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
e = kmem_cache_alloc(exception_cache, GFP_NOIO);
|
|
|
|
if (!e)
|
|
|
|
e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
|
|
|
|
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static void free_exception(struct dm_snap_exception *e)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
kmem_cache_free(exception_cache, e);
|
|
|
|
}
|
|
|
|
|
2008-07-21 05:00:35 -06:00
|
|
|
static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2008-07-21 05:00:35 -06:00
|
|
|
struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
|
|
|
|
GFP_NOIO);
|
|
|
|
|
2008-10-30 07:33:16 -06:00
|
|
|
atomic_inc(&s->pending_exceptions_count);
|
2008-07-21 05:00:35 -06:00
|
|
|
pe->snap = s;
|
|
|
|
|
|
|
|
return pe;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static void free_pending_exception(struct dm_snap_pending_exception *pe)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2008-10-30 07:33:16 -06:00
|
|
|
struct dm_snapshot *s = pe->snap;
|
|
|
|
|
|
|
|
mempool_free(pe, s->pending_pool);
|
|
|
|
smp_mb__before_atomic_dec();
|
|
|
|
atomic_dec(&s->pending_exceptions_count);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2008-02-07 19:11:27 -07:00
|
|
|
static void insert_completed_exception(struct dm_snapshot *s,
|
|
|
|
struct dm_snap_exception *new_e)
|
|
|
|
{
|
|
|
|
struct exception_table *eh = &s->complete;
|
|
|
|
struct list_head *l;
|
|
|
|
struct dm_snap_exception *e = NULL;
|
|
|
|
|
|
|
|
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
|
|
|
|
|
|
|
|
/* Add immediately if this table doesn't support consecutive chunks */
|
|
|
|
if (!eh->hash_shift)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* List is ordered by old_chunk */
|
|
|
|
list_for_each_entry_reverse(e, l, hash_list) {
|
|
|
|
/* Insert after an existing chunk? */
|
|
|
|
if (new_e->old_chunk == (e->old_chunk +
|
|
|
|
dm_consecutive_chunk_count(e) + 1) &&
|
|
|
|
new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
|
|
|
|
dm_consecutive_chunk_count(e) + 1)) {
|
|
|
|
dm_consecutive_chunk_count_inc(e);
|
|
|
|
free_exception(new_e);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert before an existing chunk? */
|
|
|
|
if (new_e->old_chunk == (e->old_chunk - 1) &&
|
|
|
|
new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
|
|
|
|
dm_consecutive_chunk_count_inc(e);
|
|
|
|
e->old_chunk--;
|
|
|
|
e->new_chunk--;
|
|
|
|
free_exception(new_e);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_e->old_chunk > e->old_chunk)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
list_add(&new_e->hash_list, e ? &e->hash_list : l);
|
|
|
|
}
|
|
|
|
|
2009-01-05 20:05:19 -07:00
|
|
|
/*
|
|
|
|
* Callback used by the exception stores to load exceptions when
|
|
|
|
* initialising.
|
|
|
|
*/
|
|
|
|
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2009-01-05 20:05:19 -07:00
|
|
|
struct dm_snapshot *s = context;
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
e = alloc_exception();
|
|
|
|
if (!e)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
e->old_chunk = old;
|
2008-02-07 19:11:27 -07:00
|
|
|
|
|
|
|
/* Consecutive_count is implicitly initialised to zero */
|
2005-04-16 16:20:36 -06:00
|
|
|
e->new_chunk = new;
|
2008-02-07 19:11:27 -07:00
|
|
|
|
|
|
|
insert_completed_exception(s, e);
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hard coded magic.
|
|
|
|
*/
|
|
|
|
static int calc_max_buckets(void)
|
|
|
|
{
|
|
|
|
/* use a fixed size of 2MB */
|
|
|
|
unsigned long mem = 2 * 1024 * 1024;
|
|
|
|
mem /= sizeof(struct list_head);
|
|
|
|
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate room for a suitable hash table.
|
|
|
|
*/
|
2009-04-02 12:55:33 -06:00
|
|
|
static int init_hash_tables(struct dm_snapshot *s, chunk_t chunk_shift,
|
|
|
|
struct dm_dev *cow)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate based on the size of the original volume or
|
|
|
|
* the COW volume...
|
|
|
|
*/
|
2009-04-02 12:55:33 -06:00
|
|
|
cow_dev_size = get_dev_size(cow->bdev);
|
2005-04-16 16:20:36 -06:00
|
|
|
origin_dev_size = get_dev_size(s->origin->bdev);
|
|
|
|
max_buckets = calc_max_buckets();
|
|
|
|
|
2009-04-02 12:55:32 -06:00
|
|
|
hash_size = min(origin_dev_size, cow_dev_size) >> chunk_shift;
|
2005-04-16 16:20:36 -06:00
|
|
|
hash_size = min(hash_size, max_buckets);
|
|
|
|
|
2008-02-07 19:10:06 -07:00
|
|
|
hash_size = rounddown_pow_of_two(hash_size);
|
2008-02-07 19:11:27 -07:00
|
|
|
if (init_exception_table(&s->complete, hash_size,
|
|
|
|
DM_CHUNK_CONSECUTIVE_BITS))
|
2005-04-16 16:20:36 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate hash table for in-flight exceptions
|
|
|
|
* Make this smaller than the real hash table
|
|
|
|
*/
|
|
|
|
hash_size >>= 3;
|
|
|
|
if (hash_size < 64)
|
|
|
|
hash_size = 64;
|
|
|
|
|
2008-02-07 19:11:27 -07:00
|
|
|
if (init_exception_table(&s->pending, hash_size, 0)) {
|
2005-04-16 16:20:36 -06:00
|
|
|
exit_exception_table(&s->complete, exception_cache);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Round a number up to the nearest 'size' boundary. size must
|
|
|
|
* be a power of 2.
|
|
|
|
*/
|
2007-07-12 10:26:32 -06:00
|
|
|
static ulong round_up(ulong n, ulong size)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
size--;
|
|
|
|
return (n + size) & ~size;
|
|
|
|
}
|
|
|
|
|
2006-10-03 02:15:25 -06:00
|
|
|
static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
|
2009-04-02 12:55:32 -06:00
|
|
|
chunk_t *chunk_size, chunk_t *chunk_mask,
|
2009-04-02 12:55:33 -06:00
|
|
|
chunk_t *chunk_shift, struct dm_dev *cow,
|
|
|
|
char **error)
|
2006-10-03 02:15:25 -06:00
|
|
|
{
|
2009-04-02 12:55:32 -06:00
|
|
|
unsigned long chunk_size_ulong;
|
2006-10-03 02:15:25 -06:00
|
|
|
char *value;
|
|
|
|
|
2009-04-02 12:55:32 -06:00
|
|
|
chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
|
2006-10-03 02:15:25 -06:00
|
|
|
if (*chunk_size_arg == '\0' || *value != '\0') {
|
|
|
|
*error = "Invalid chunk size";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:32 -06:00
|
|
|
if (!chunk_size_ulong) {
|
|
|
|
*chunk_size = *chunk_mask = *chunk_shift = 0;
|
2006-10-03 02:15:25 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Chunk size must be multiple of page size. Silently
|
|
|
|
* round up if it's not.
|
|
|
|
*/
|
2009-04-02 12:55:32 -06:00
|
|
|
chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
|
2006-10-03 02:15:25 -06:00
|
|
|
|
|
|
|
/* Check chunk_size is a power of 2 */
|
2009-04-02 12:55:32 -06:00
|
|
|
if (!is_power_of_2(chunk_size_ulong)) {
|
2006-10-03 02:15:25 -06:00
|
|
|
*error = "Chunk size is not a power of 2";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate the chunk size against the device block size */
|
2009-04-02 12:55:33 -06:00
|
|
|
if (chunk_size_ulong % (bdev_hardsect_size(cow->bdev) >> 9)) {
|
2006-10-03 02:15:25 -06:00
|
|
|
*error = "Chunk size is not a multiple of device blocksize";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:32 -06:00
|
|
|
*chunk_size = chunk_size_ulong;
|
|
|
|
*chunk_mask = chunk_size_ulong - 1;
|
|
|
|
*chunk_shift = ffs(chunk_size_ulong) - 1;
|
2006-10-03 02:15:25 -06:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
|
|
|
|
*/
|
|
|
|
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
|
|
{
|
|
|
|
struct dm_snapshot *s;
|
2008-07-21 05:00:32 -06:00
|
|
|
int i;
|
2005-04-16 16:20:36 -06:00
|
|
|
int r = -EINVAL;
|
|
|
|
char persistent;
|
|
|
|
char *origin_path;
|
|
|
|
char *cow_path;
|
2009-04-02 12:55:32 -06:00
|
|
|
chunk_t chunk_size, chunk_mask, chunk_shift;
|
2009-04-02 12:55:33 -06:00
|
|
|
struct dm_dev *cow;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-10-03 02:15:25 -06:00
|
|
|
if (argc != 4) {
|
2006-06-26 01:27:35 -06:00
|
|
|
ti->error = "requires exactly 4 arguments";
|
2005-04-16 16:20:36 -06:00
|
|
|
r = -EINVAL;
|
|
|
|
goto bad1;
|
|
|
|
}
|
|
|
|
|
|
|
|
origin_path = argv[0];
|
|
|
|
cow_path = argv[1];
|
|
|
|
persistent = toupper(*argv[2]);
|
|
|
|
|
|
|
|
if (persistent != 'P' && persistent != 'N') {
|
|
|
|
ti->error = "Persistent flag is not P or N";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad1;
|
|
|
|
}
|
|
|
|
|
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
|
if (s == NULL) {
|
|
|
|
ti->error = "Cannot allocate snapshot context private "
|
|
|
|
"structure";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad1;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
|
|
|
|
if (r) {
|
|
|
|
ti->error = "Cannot get origin device";
|
|
|
|
goto bad2;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dm_get_device(ti, cow_path, 0, 0,
|
2009-04-02 12:55:33 -06:00
|
|
|
FMODE_READ | FMODE_WRITE, &cow);
|
2005-04-16 16:20:36 -06:00
|
|
|
if (r) {
|
|
|
|
dm_put_device(ti, s->origin);
|
|
|
|
ti->error = "Cannot get COW device";
|
|
|
|
goto bad2;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:32 -06:00
|
|
|
r = set_chunk_size(s, argv[3], &chunk_size, &chunk_mask, &chunk_shift,
|
2009-04-02 12:55:33 -06:00
|
|
|
cow, &ti->error);
|
2006-10-03 02:15:25 -06:00
|
|
|
if (r)
|
2005-04-16 16:20:36 -06:00
|
|
|
goto bad3;
|
|
|
|
|
|
|
|
s->valid = 1;
|
2006-02-01 04:04:50 -07:00
|
|
|
s->active = 0;
|
2008-10-30 07:33:16 -06:00
|
|
|
atomic_set(&s->pending_exceptions_count, 0);
|
2005-04-16 16:20:36 -06:00
|
|
|
init_rwsem(&s->lock);
|
2006-10-03 02:15:30 -06:00
|
|
|
spin_lock_init(&s->pe_lock);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Allocate hash table for COW data */
|
2009-04-02 12:55:33 -06:00
|
|
|
if (init_hash_tables(s, chunk_shift, cow)) {
|
2005-04-16 16:20:36 -06:00
|
|
|
ti->error = "Unable to allocate hash table space";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad3;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:32 -06:00
|
|
|
r = dm_exception_store_create(argv[2], ti, chunk_size, chunk_mask,
|
2009-04-02 12:55:33 -06:00
|
|
|
chunk_shift, cow, &s->store);
|
2005-04-16 16:20:36 -06:00
|
|
|
if (r) {
|
|
|
|
ti->error = "Couldn't create exception store";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad4;
|
|
|
|
}
|
|
|
|
|
2008-04-24 14:43:19 -06:00
|
|
|
r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
|
2005-04-16 16:20:36 -06:00
|
|
|
if (r) {
|
|
|
|
ti->error = "Could not create kcopyd client";
|
|
|
|
goto bad5;
|
|
|
|
}
|
|
|
|
|
2008-07-21 05:00:35 -06:00
|
|
|
s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
|
|
|
|
if (!s->pending_pool) {
|
|
|
|
ti->error = "Could not allocate mempool for pending exceptions";
|
|
|
|
goto bad6;
|
|
|
|
}
|
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
|
|
|
|
tracked_chunk_cache);
|
|
|
|
if (!s->tracked_chunk_pool) {
|
|
|
|
ti->error = "Could not allocate tracked_chunk mempool for "
|
|
|
|
"tracking reads";
|
2008-07-21 05:00:35 -06:00
|
|
|
goto bad_tracked_chunk_pool;
|
2008-07-21 05:00:32 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
|
|
|
|
INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
|
|
|
|
|
|
|
|
spin_lock_init(&s->tracked_chunk_lock);
|
|
|
|
|
2006-02-01 04:04:50 -07:00
|
|
|
/* Metadata must only be loaded into one table at once */
|
2009-04-02 12:55:31 -06:00
|
|
|
r = s->store->type->read_metadata(s->store, dm_add_exception,
|
|
|
|
(void *)s);
|
2007-07-12 10:28:13 -06:00
|
|
|
if (r < 0) {
|
2006-10-03 02:15:25 -06:00
|
|
|
ti->error = "Failed to read snapshot metadata";
|
2008-07-21 05:00:32 -06:00
|
|
|
goto bad_load_and_register;
|
2007-07-12 10:28:13 -06:00
|
|
|
} else if (r > 0) {
|
|
|
|
s->valid = 0;
|
|
|
|
DMWARN("Snapshot is marked invalid.");
|
2006-10-03 02:15:25 -06:00
|
|
|
}
|
2006-02-01 04:04:50 -07:00
|
|
|
|
2006-10-03 02:15:30 -06:00
|
|
|
bio_list_init(&s->queued_bios);
|
2006-11-22 07:57:56 -07:00
|
|
|
INIT_WORK(&s->queued_bios_work, flush_queued_bios);
|
2006-10-03 02:15:30 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/* Add snapshot to the list of snapshots for this origin */
|
2006-02-01 04:04:50 -07:00
|
|
|
/* Exceptions aren't triggered till snapshot_resume() is called */
|
2005-04-16 16:20:36 -06:00
|
|
|
if (register_snapshot(s)) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Cannot register snapshot origin";
|
2008-07-21 05:00:32 -06:00
|
|
|
goto bad_load_and_register;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
ti->private = s;
|
2009-04-02 12:55:32 -06:00
|
|
|
ti->split_io = s->store->chunk_size;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
bad_load_and_register:
|
|
|
|
mempool_destroy(s->tracked_chunk_pool);
|
|
|
|
|
2008-07-21 05:00:35 -06:00
|
|
|
bad_tracked_chunk_pool:
|
|
|
|
mempool_destroy(s->pending_pool);
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
bad6:
|
2008-04-24 14:43:19 -06:00
|
|
|
dm_kcopyd_client_destroy(s->kcopyd_client);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
bad5:
|
2009-04-02 12:55:31 -06:00
|
|
|
s->store->type->dtr(s->store);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
bad4:
|
|
|
|
exit_exception_table(&s->pending, pending_cache);
|
|
|
|
exit_exception_table(&s->complete, exception_cache);
|
|
|
|
|
|
|
|
bad3:
|
2009-04-02 12:55:33 -06:00
|
|
|
dm_put_device(ti, cow);
|
2005-04-16 16:20:36 -06:00
|
|
|
dm_put_device(ti, s->origin);
|
|
|
|
|
|
|
|
bad2:
|
|
|
|
kfree(s);
|
|
|
|
|
|
|
|
bad1:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2006-12-08 03:41:11 -07:00
|
|
|
static void __free_exceptions(struct dm_snapshot *s)
|
|
|
|
{
|
2008-04-24 14:43:19 -06:00
|
|
|
dm_kcopyd_client_destroy(s->kcopyd_client);
|
2006-12-08 03:41:11 -07:00
|
|
|
s->kcopyd_client = NULL;
|
|
|
|
|
|
|
|
exit_exception_table(&s->pending, pending_cache);
|
|
|
|
exit_exception_table(&s->complete, exception_cache);
|
|
|
|
|
2009-04-02 12:55:31 -06:00
|
|
|
s->store->type->dtr(s->store);
|
2006-12-08 03:41:11 -07:00
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
static void snapshot_dtr(struct dm_target *ti)
|
|
|
|
{
|
2008-07-21 05:00:32 -06:00
|
|
|
#ifdef CONFIG_DM_DEBUG
|
|
|
|
int i;
|
|
|
|
#endif
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snapshot *s = ti->private;
|
2009-04-02 12:55:33 -06:00
|
|
|
struct dm_dev *cow = s->store->cow;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-10-03 02:15:30 -06:00
|
|
|
flush_workqueue(ksnapd);
|
|
|
|
|
2006-03-27 02:17:50 -07:00
|
|
|
/* Prevent further origin writes from using this snapshot. */
|
|
|
|
/* After this returns there can be no new kcopyd jobs. */
|
2005-04-16 16:20:36 -06:00
|
|
|
unregister_snapshot(s);
|
|
|
|
|
2008-10-30 07:33:16 -06:00
|
|
|
while (atomic_read(&s->pending_exceptions_count))
|
2009-01-05 20:04:54 -07:00
|
|
|
msleep(1);
|
2008-10-30 07:33:16 -06:00
|
|
|
/*
|
|
|
|
* Ensure instructions in mempool_destroy aren't reordered
|
|
|
|
* before atomic_read.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
#ifdef CONFIG_DM_DEBUG
|
|
|
|
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
|
|
|
|
BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
mempool_destroy(s->tracked_chunk_pool);
|
|
|
|
|
2006-12-08 03:41:11 -07:00
|
|
|
__free_exceptions(s);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2008-07-21 05:00:35 -06:00
|
|
|
mempool_destroy(s->pending_pool);
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
dm_put_device(ti, s->origin);
|
2009-04-02 12:55:33 -06:00
|
|
|
dm_put_device(ti, cow);
|
2006-03-27 02:17:50 -07:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
kfree(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush a list of buffers.
|
|
|
|
*/
|
|
|
|
static void flush_bios(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct bio *n;
|
|
|
|
|
|
|
|
while (bio) {
|
|
|
|
n = bio->bi_next;
|
|
|
|
bio->bi_next = NULL;
|
|
|
|
generic_make_request(bio);
|
|
|
|
bio = n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-22 07:57:56 -07:00
|
|
|
static void flush_queued_bios(struct work_struct *work)
|
2006-10-03 02:15:30 -06:00
|
|
|
{
|
2006-11-22 07:57:56 -07:00
|
|
|
struct dm_snapshot *s =
|
|
|
|
container_of(work, struct dm_snapshot, queued_bios_work);
|
2006-10-03 02:15:30 -06:00
|
|
|
struct bio *queued_bios;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&s->pe_lock, flags);
|
|
|
|
queued_bios = bio_list_get(&s->queued_bios);
|
|
|
|
spin_unlock_irqrestore(&s->pe_lock, flags);
|
|
|
|
|
|
|
|
flush_bios(queued_bios);
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Error a list of buffers.
|
|
|
|
*/
|
|
|
|
static void error_bios(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct bio *n;
|
|
|
|
|
|
|
|
while (bio) {
|
|
|
|
n = bio->bi_next;
|
|
|
|
bio->bi_next = NULL;
|
2007-09-27 04:47:43 -06:00
|
|
|
bio_io_error(bio);
|
2005-04-16 16:20:36 -06:00
|
|
|
bio = n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-03 02:15:31 -06:00
|
|
|
static void __invalidate_snapshot(struct dm_snapshot *s, int err)
|
2006-03-27 02:17:45 -07:00
|
|
|
{
|
|
|
|
if (!s->valid)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (err == -EIO)
|
|
|
|
DMERR("Invalidating snapshot: Error reading/writing.");
|
|
|
|
else if (err == -ENOMEM)
|
|
|
|
DMERR("Invalidating snapshot: Unable to allocate exception.");
|
|
|
|
|
2009-04-02 12:55:31 -06:00
|
|
|
if (s->store->type->drop_snapshot)
|
|
|
|
s->store->type->drop_snapshot(s->store);
|
2006-03-27 02:17:45 -07:00
|
|
|
|
|
|
|
s->valid = 0;
|
|
|
|
|
2009-04-02 12:55:32 -06:00
|
|
|
dm_table_event(s->store->ti->table);
|
2006-03-27 02:17:45 -07:00
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static void get_pending_exception(struct dm_snap_pending_exception *pe)
|
2006-10-03 02:15:30 -06:00
|
|
|
{
|
|
|
|
atomic_inc(&pe->ref_count);
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
|
2006-10-03 02:15:30 -06:00
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_pending_exception *primary_pe;
|
2006-10-03 02:15:30 -06:00
|
|
|
struct bio *origin_bios = NULL;
|
|
|
|
|
|
|
|
primary_pe = pe->primary_pe;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this pe is involved in a write to the origin and
|
|
|
|
* it is the last sibling to complete then release
|
|
|
|
* the bios for the original write to the origin.
|
|
|
|
*/
|
|
|
|
if (primary_pe &&
|
dm snapshot: fix primary_pe race
Fix a race condition with primary_pe ref_count handling.
put_pending_exception runs under dm_snapshot->lock, it does atomic_dec_and_test
on primary_pe->ref_count, and later does atomic_read primary_pe->ref_count.
__origin_write does atomic_dec_and_test on primary_pe->ref_count without holding
dm_snapshot->lock.
This opens the following race condition:
Assume two CPUs, CPU1 is executing put_pending_exception (and holding
dm_snapshot->lock). CPU2 is executing __origin_write in parallel.
primary_pe->ref_count == 2.
CPU1:
if (primary_pe && atomic_dec_and_test(&primary_pe->ref_count))
origin_bios = bio_list_get(&primary_pe->origin_bios);
... decrements primary_pe->ref_count to 1. Doesn't load origin_bios
CPU2:
if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
flush_bios(bio_list_get(&primary_pe->origin_bios));
free_pending_exception(primary_pe);
/* If we got here, pe_queue is necessarily empty. */
return r;
}
... decrements primary_pe->ref_count to 0, submits pending bios, frees
primary_pe.
CPU1:
if (!primary_pe || primary_pe != pe)
free_pending_exception(pe);
... this has no effect.
if (primary_pe && !atomic_read(&primary_pe->ref_count))
free_pending_exception(primary_pe);
... sees ref_count == 0 (written by CPU 2), does double free !!
This bug can happen only if someone is simultaneously writing to both the
origin and the snapshot.
If someone is writing only to the origin, __origin_write will submit kcopyd
request after it decrements primary_pe->ref_count (so it can't happen that the
finished copy races with primary_pe->ref_count decrementation).
If someone is writing only to the snapshot, __origin_write isn't invoked at all
and the race can't happen.
The race happens when someone writes to the snapshot --- this creates
pending_exception with primary_pe == NULL and starts copying. Then, someone
writes to the same chunk in the snapshot, and __origin_write races with
termination of already submitted request in pending_complete (that calls
put_pending_exception).
This race may be reason for bugs:
http://bugzilla.kernel.org/show_bug.cgi?id=11636
https://bugzilla.redhat.com/show_bug.cgi?id=465825
The patch fixes the code to make sure that:
1. If atomic_dec_and_test(&primary_pe->ref_count) returns false, the process
must no longer dereference primary_pe (because someone else may free it under
us).
2. If atomic_dec_and_test(&primary_pe->ref_count) returns true, the process
is responsible for freeing primary_pe.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: stable@kernel.org
2008-10-21 10:44:51 -06:00
|
|
|
atomic_dec_and_test(&primary_pe->ref_count)) {
|
2006-10-03 02:15:30 -06:00
|
|
|
origin_bios = bio_list_get(&primary_pe->origin_bios);
|
dm snapshot: fix primary_pe race
Fix a race condition with primary_pe ref_count handling.
put_pending_exception runs under dm_snapshot->lock, it does atomic_dec_and_test
on primary_pe->ref_count, and later does atomic_read primary_pe->ref_count.
__origin_write does atomic_dec_and_test on primary_pe->ref_count without holding
dm_snapshot->lock.
This opens the following race condition:
Assume two CPUs, CPU1 is executing put_pending_exception (and holding
dm_snapshot->lock). CPU2 is executing __origin_write in parallel.
primary_pe->ref_count == 2.
CPU1:
if (primary_pe && atomic_dec_and_test(&primary_pe->ref_count))
origin_bios = bio_list_get(&primary_pe->origin_bios);
... decrements primary_pe->ref_count to 1. Doesn't load origin_bios
CPU2:
if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
flush_bios(bio_list_get(&primary_pe->origin_bios));
free_pending_exception(primary_pe);
/* If we got here, pe_queue is necessarily empty. */
return r;
}
... decrements primary_pe->ref_count to 0, submits pending bios, frees
primary_pe.
CPU1:
if (!primary_pe || primary_pe != pe)
free_pending_exception(pe);
... this has no effect.
if (primary_pe && !atomic_read(&primary_pe->ref_count))
free_pending_exception(primary_pe);
... sees ref_count == 0 (written by CPU 2), does double free !!
This bug can happen only if someone is simultaneously writing to both the
origin and the snapshot.
If someone is writing only to the origin, __origin_write will submit kcopyd
request after it decrements primary_pe->ref_count (so it can't happen that the
finished copy races with primary_pe->ref_count decrementation).
If someone is writing only to the snapshot, __origin_write isn't invoked at all
and the race can't happen.
The race happens when someone writes to the snapshot --- this creates
pending_exception with primary_pe == NULL and starts copying. Then, someone
writes to the same chunk in the snapshot, and __origin_write races with
termination of already submitted request in pending_complete (that calls
put_pending_exception).
This race may be reason for bugs:
http://bugzilla.kernel.org/show_bug.cgi?id=11636
https://bugzilla.redhat.com/show_bug.cgi?id=465825
The patch fixes the code to make sure that:
1. If atomic_dec_and_test(&primary_pe->ref_count) returns false, the process
must no longer dereference primary_pe (because someone else may free it under
us).
2. If atomic_dec_and_test(&primary_pe->ref_count) returns true, the process
is responsible for freeing primary_pe.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: stable@kernel.org
2008-10-21 10:44:51 -06:00
|
|
|
free_pending_exception(primary_pe);
|
|
|
|
}
|
2006-10-03 02:15:30 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the pe if it's not linked to an origin write or if
|
|
|
|
* it's not itself a primary pe.
|
|
|
|
*/
|
|
|
|
if (!primary_pe || primary_pe != pe)
|
|
|
|
free_pending_exception(pe);
|
|
|
|
|
|
|
|
return origin_bios;
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static void pending_complete(struct dm_snap_pending_exception *pe, int success)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e;
|
2005-04-16 16:20:36 -06:00
|
|
|
struct dm_snapshot *s = pe->snap;
|
2006-10-03 02:15:29 -06:00
|
|
|
struct bio *origin_bios = NULL;
|
|
|
|
struct bio *snapshot_bios = NULL;
|
|
|
|
int error = 0;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-03-27 02:17:45 -07:00
|
|
|
if (!success) {
|
|
|
|
/* Read/write error - snapshot is unusable */
|
2005-04-16 16:20:36 -06:00
|
|
|
down_write(&s->lock);
|
2006-10-03 02:15:31 -06:00
|
|
|
__invalidate_snapshot(s, -EIO);
|
2006-10-03 02:15:29 -06:00
|
|
|
error = 1;
|
2006-03-27 02:17:45 -07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
e = alloc_exception();
|
|
|
|
if (!e) {
|
2005-04-16 16:20:36 -06:00
|
|
|
down_write(&s->lock);
|
2006-10-03 02:15:31 -06:00
|
|
|
__invalidate_snapshot(s, -ENOMEM);
|
2006-10-03 02:15:29 -06:00
|
|
|
error = 1;
|
2006-03-27 02:17:45 -07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
*e = pe->e;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-03-27 02:17:45 -07:00
|
|
|
down_write(&s->lock);
|
|
|
|
if (!s->valid) {
|
|
|
|
free_exception(e);
|
2006-10-03 02:15:29 -06:00
|
|
|
error = 1;
|
2006-03-27 02:17:45 -07:00
|
|
|
goto out;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2008-07-21 05:00:34 -06:00
|
|
|
/*
|
|
|
|
* Check for conflicting reads. This is extremely improbable,
|
2009-01-05 20:04:54 -07:00
|
|
|
* so msleep(1) is sufficient and there is no need for a wait queue.
|
2008-07-21 05:00:34 -06:00
|
|
|
*/
|
|
|
|
while (__chunk_is_tracked(s, pe->e.old_chunk))
|
2009-01-05 20:04:54 -07:00
|
|
|
msleep(1);
|
2008-07-21 05:00:34 -06:00
|
|
|
|
2006-10-03 02:15:29 -06:00
|
|
|
/*
|
|
|
|
* Add a proper exception, and remove the
|
|
|
|
* in-flight exception from the list.
|
|
|
|
*/
|
2008-02-07 19:11:27 -07:00
|
|
|
insert_completed_exception(s, e);
|
2006-03-27 02:17:45 -07:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
out:
|
2006-10-03 02:15:31 -06:00
|
|
|
remove_exception(&pe->e);
|
2006-10-03 02:15:29 -06:00
|
|
|
snapshot_bios = bio_list_get(&pe->snapshot_bios);
|
2006-10-03 02:15:30 -06:00
|
|
|
origin_bios = put_pending_exception(pe);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-10-03 02:15:29 -06:00
|
|
|
up_write(&s->lock);
|
|
|
|
|
|
|
|
/* Submit any pending write bios */
|
|
|
|
if (error)
|
|
|
|
error_bios(snapshot_bios);
|
|
|
|
else
|
|
|
|
flush_bios(snapshot_bios);
|
|
|
|
|
|
|
|
flush_bios(origin_bios);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void commit_callback(void *context, int success)
|
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_pending_exception *pe = context;
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
pending_complete(pe, success);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called when the copy I/O has finished. kcopyd actually runs
|
|
|
|
* this code so don't block.
|
|
|
|
*/
|
2008-03-28 15:16:10 -06:00
|
|
|
static void copy_callback(int read_err, unsigned long write_err, void *context)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_pending_exception *pe = context;
|
2005-04-16 16:20:36 -06:00
|
|
|
struct dm_snapshot *s = pe->snap;
|
|
|
|
|
|
|
|
if (read_err || write_err)
|
|
|
|
pending_complete(pe, 0);
|
|
|
|
|
|
|
|
else
|
|
|
|
/* Update the metadata if we are persistent */
|
2009-04-02 12:55:31 -06:00
|
|
|
s->store->type->commit_exception(s->store, &pe->e,
|
|
|
|
commit_callback, pe);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dispatches the copy operation to kcopyd.
|
|
|
|
*/
|
2007-07-12 10:26:32 -06:00
|
|
|
static void start_copy(struct dm_snap_pending_exception *pe)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
struct dm_snapshot *s = pe->snap;
|
2008-04-24 14:43:17 -06:00
|
|
|
struct dm_io_region src, dest;
|
2005-04-16 16:20:36 -06:00
|
|
|
struct block_device *bdev = s->origin->bdev;
|
|
|
|
sector_t dev_size;
|
|
|
|
|
|
|
|
dev_size = get_dev_size(bdev);
|
|
|
|
|
|
|
|
src.bdev = bdev;
|
2009-04-02 12:55:33 -06:00
|
|
|
src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
|
2009-04-02 12:55:32 -06:00
|
|
|
src.count = min(s->store->chunk_size, dev_size - src.sector);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2009-04-02 12:55:33 -06:00
|
|
|
dest.bdev = s->store->cow->bdev;
|
2009-04-02 12:55:33 -06:00
|
|
|
dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
|
2005-04-16 16:20:36 -06:00
|
|
|
dest.count = src.count;
|
|
|
|
|
|
|
|
/* Hand over to kcopyd */
|
2008-04-24 14:43:19 -06:00
|
|
|
dm_kcopyd_copy(s->kcopyd_client,
|
2005-04-16 16:20:36 -06:00
|
|
|
&src, 1, &dest, 0, copy_callback, pe);
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:25 -06:00
|
|
|
static struct dm_snap_pending_exception *
|
|
|
|
__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
|
|
|
|
{
|
|
|
|
struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
|
|
|
|
|
|
|
|
if (!e)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return container_of(e, struct dm_snap_pending_exception, e);
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Looks to see if this snapshot already has a pending exception
|
|
|
|
* for this chunk, otherwise it allocates a new one and inserts
|
|
|
|
* it into the pending table.
|
|
|
|
*
|
|
|
|
* NOTE: a write lock must be held on snap->lock before calling
|
|
|
|
* this.
|
|
|
|
*/
|
2007-07-12 10:26:32 -06:00
|
|
|
static struct dm_snap_pending_exception *
|
2009-04-02 12:55:25 -06:00
|
|
|
__find_pending_exception(struct dm_snapshot *s,
|
|
|
|
struct dm_snap_pending_exception *pe, chunk_t chunk)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2009-04-02 12:55:25 -06:00
|
|
|
struct dm_snap_pending_exception *pe2;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2009-04-02 12:55:25 -06:00
|
|
|
pe2 = __lookup_pending_exception(s, chunk);
|
|
|
|
if (pe2) {
|
2006-03-27 02:17:45 -07:00
|
|
|
free_pending_exception(pe);
|
2009-04-02 12:55:25 -06:00
|
|
|
return pe2;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2006-03-27 02:17:45 -07:00
|
|
|
pe->e.old_chunk = chunk;
|
|
|
|
bio_list_init(&pe->origin_bios);
|
|
|
|
bio_list_init(&pe->snapshot_bios);
|
|
|
|
pe->primary_pe = NULL;
|
2006-10-03 02:15:30 -06:00
|
|
|
atomic_set(&pe->ref_count, 0);
|
2006-03-27 02:17:45 -07:00
|
|
|
pe->started = 0;
|
|
|
|
|
2009-04-02 12:55:31 -06:00
|
|
|
if (s->store->type->prepare_exception(s->store, &pe->e)) {
|
2006-03-27 02:17:45 -07:00
|
|
|
free_pending_exception(pe);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-10-03 02:15:30 -06:00
|
|
|
get_pending_exception(pe);
|
2006-03-27 02:17:45 -07:00
|
|
|
insert_exception(&s->pending, &pe->e);
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
return pe;
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
|
2008-02-07 19:11:27 -07:00
|
|
|
struct bio *bio, chunk_t chunk)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2009-04-02 12:55:33 -06:00
|
|
|
bio->bi_bdev = s->store->cow->bdev;
|
2009-04-02 12:55:33 -06:00
|
|
|
bio->bi_sector = chunk_to_sector(s->store,
|
|
|
|
dm_chunk_number(e->new_chunk) +
|
|
|
|
(chunk - e->old_chunk)) +
|
|
|
|
(bio->bi_sector &
|
|
|
|
s->store->chunk_mask);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static int snapshot_map(struct dm_target *ti, struct bio *bio,
|
|
|
|
union map_info *map_context)
|
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e;
|
|
|
|
struct dm_snapshot *s = ti->private;
|
2006-12-08 03:41:06 -07:00
|
|
|
int r = DM_MAPIO_REMAPPED;
|
2005-04-16 16:20:36 -06:00
|
|
|
chunk_t chunk;
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_pending_exception *pe = NULL;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2009-04-02 12:55:33 -06:00
|
|
|
chunk = sector_to_chunk(s->store, bio->bi_sector);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Full snapshots are not usable */
|
2006-03-27 02:17:45 -07:00
|
|
|
/* To get here the table must be live so s->active is always set. */
|
2005-04-16 16:20:36 -06:00
|
|
|
if (!s->valid)
|
2005-07-12 16:53:01 -06:00
|
|
|
return -EIO;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-10-03 02:15:28 -06:00
|
|
|
/* FIXME: should only take write lock if we need
|
|
|
|
* to copy an exception */
|
|
|
|
down_write(&s->lock);
|
|
|
|
|
|
|
|
if (!s->valid) {
|
|
|
|
r = -EIO;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the block is already remapped - use that, else remap it */
|
|
|
|
e = lookup_exception(&s->complete, chunk);
|
|
|
|
if (e) {
|
2008-02-07 19:11:27 -07:00
|
|
|
remap_exception(s, e, bio, chunk);
|
2006-10-03 02:15:28 -06:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Write to snapshot - higher level takes care of RW/RO
|
|
|
|
* flags so we should only get this if we are
|
|
|
|
* writeable.
|
|
|
|
*/
|
|
|
|
if (bio_rw(bio) == WRITE) {
|
2009-04-02 12:55:25 -06:00
|
|
|
pe = __lookup_pending_exception(s, chunk);
|
2006-03-27 02:17:45 -07:00
|
|
|
if (!pe) {
|
2009-04-02 12:55:25 -06:00
|
|
|
up_write(&s->lock);
|
|
|
|
pe = alloc_pending_exception(s);
|
|
|
|
down_write(&s->lock);
|
|
|
|
|
|
|
|
if (!s->valid) {
|
|
|
|
free_pending_exception(pe);
|
|
|
|
r = -EIO;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:26 -06:00
|
|
|
e = lookup_exception(&s->complete, chunk);
|
|
|
|
if (e) {
|
|
|
|
free_pending_exception(pe);
|
|
|
|
remap_exception(s, e, bio, chunk);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:25 -06:00
|
|
|
pe = __find_pending_exception(s, pe, chunk);
|
2009-04-02 12:55:25 -06:00
|
|
|
if (!pe) {
|
|
|
|
__invalidate_snapshot(s, -ENOMEM);
|
|
|
|
r = -EIO;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2008-02-07 19:11:27 -07:00
|
|
|
remap_exception(s, &pe->e, bio, chunk);
|
2006-03-27 02:17:45 -07:00
|
|
|
bio_list_add(&pe->snapshot_bios, bio);
|
|
|
|
|
2006-12-08 03:41:06 -07:00
|
|
|
r = DM_MAPIO_SUBMITTED;
|
2006-10-03 02:15:28 -06:00
|
|
|
|
2006-03-27 02:17:45 -07:00
|
|
|
if (!pe->started) {
|
|
|
|
/* this is protected by snap->lock */
|
|
|
|
pe->started = 1;
|
2006-10-03 02:15:28 -06:00
|
|
|
up_write(&s->lock);
|
2006-03-27 02:17:45 -07:00
|
|
|
start_copy(pe);
|
2006-10-03 02:15:28 -06:00
|
|
|
goto out;
|
|
|
|
}
|
2008-07-21 05:00:32 -06:00
|
|
|
} else {
|
2006-10-03 02:15:28 -06:00
|
|
|
bio->bi_bdev = s->origin->bdev;
|
2008-07-21 05:00:32 -06:00
|
|
|
map_context->ptr = track_chunk(s, chunk);
|
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-10-03 02:15:28 -06:00
|
|
|
out_unlock:
|
|
|
|
up_write(&s->lock);
|
|
|
|
out:
|
2005-04-16 16:20:36 -06:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
|
|
|
|
int error, union map_info *map_context)
|
|
|
|
{
|
|
|
|
struct dm_snapshot *s = ti->private;
|
|
|
|
struct dm_snap_tracked_chunk *c = map_context->ptr;
|
|
|
|
|
|
|
|
if (c)
|
|
|
|
stop_tracking_chunk(s, c);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
static void snapshot_resume(struct dm_target *ti)
|
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snapshot *s = ti->private;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-02-01 04:04:50 -07:00
|
|
|
down_write(&s->lock);
|
|
|
|
s->active = 1;
|
|
|
|
up_write(&s->lock);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static int snapshot_status(struct dm_target *ti, status_type_t type,
|
|
|
|
char *result, unsigned int maxlen)
|
|
|
|
{
|
2009-04-02 12:55:34 -06:00
|
|
|
unsigned sz = 0;
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snapshot *snap = ti->private;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case STATUSTYPE_INFO:
|
|
|
|
if (!snap->valid)
|
2009-04-02 12:55:34 -06:00
|
|
|
DMEMIT("Invalid");
|
2005-04-16 16:20:36 -06:00
|
|
|
else {
|
2009-04-02 12:55:31 -06:00
|
|
|
if (snap->store->type->fraction_full) {
|
2005-04-16 16:20:36 -06:00
|
|
|
sector_t numerator, denominator;
|
2009-04-02 12:55:31 -06:00
|
|
|
snap->store->type->fraction_full(snap->store,
|
|
|
|
&numerator,
|
|
|
|
&denominator);
|
2009-04-02 12:55:34 -06:00
|
|
|
DMEMIT("%llu/%llu",
|
|
|
|
(unsigned long long)numerator,
|
|
|
|
(unsigned long long)denominator);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
else
|
2009-04-02 12:55:34 -06:00
|
|
|
DMEMIT("Unknown");
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STATUSTYPE_TABLE:
|
|
|
|
/*
|
|
|
|
* kdevname returns a static pointer so we need
|
|
|
|
* to make private copies if the output is to
|
|
|
|
* make sense.
|
|
|
|
*/
|
2009-04-02 12:55:34 -06:00
|
|
|
DMEMIT("%s", snap->origin->name);
|
|
|
|
DMEMIT(" %s %s %llu", snap->store->cow->name,
|
|
|
|
snap->store->type->name,
|
|
|
|
(unsigned long long)snap->store->chunk_size);
|
2005-04-16 16:20:36 -06:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-----------------------------------------------------------------
|
|
|
|
* Origin methods
|
|
|
|
*---------------------------------------------------------------*/
|
|
|
|
static int __origin_write(struct list_head *snapshots, struct bio *bio)
|
|
|
|
{
|
2006-12-08 03:41:06 -07:00
|
|
|
int r = DM_MAPIO_REMAPPED, first = 0;
|
2005-04-16 16:20:36 -06:00
|
|
|
struct dm_snapshot *snap;
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e;
|
|
|
|
struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
|
2005-04-16 16:20:36 -06:00
|
|
|
chunk_t chunk;
|
2006-03-27 02:17:42 -07:00
|
|
|
LIST_HEAD(pe_queue);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Do all the snapshots on this origin */
|
|
|
|
list_for_each_entry (snap, snapshots, list) {
|
|
|
|
|
2006-03-27 02:17:45 -07:00
|
|
|
down_write(&snap->lock);
|
|
|
|
|
2006-02-01 04:04:50 -07:00
|
|
|
/* Only deal with valid and active snapshots */
|
|
|
|
if (!snap->valid || !snap->active)
|
2006-03-27 02:17:45 -07:00
|
|
|
goto next_snapshot;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2005-07-12 16:53:05 -06:00
|
|
|
/* Nothing to do if writing beyond end of snapshot */
|
2009-04-02 12:55:32 -06:00
|
|
|
if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table))
|
2006-03-27 02:17:45 -07:00
|
|
|
goto next_snapshot;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remember, different snapshots can have
|
|
|
|
* different chunk sizes.
|
|
|
|
*/
|
2009-04-02 12:55:33 -06:00
|
|
|
chunk = sector_to_chunk(snap->store, bio->bi_sector);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check exception table to see if block
|
|
|
|
* is already remapped in this snapshot
|
|
|
|
* and trigger an exception if not.
|
2006-03-27 02:17:44 -07:00
|
|
|
*
|
2006-10-03 02:15:30 -06:00
|
|
|
* ref_count is initialised to 1 so pending_complete()
|
2006-03-27 02:17:44 -07:00
|
|
|
* won't destroy the primary_pe while we're inside this loop.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
e = lookup_exception(&snap->complete, chunk);
|
2006-03-27 02:17:45 -07:00
|
|
|
if (e)
|
|
|
|
goto next_snapshot;
|
|
|
|
|
2009-04-02 12:55:25 -06:00
|
|
|
pe = __lookup_pending_exception(snap, chunk);
|
2006-03-27 02:17:45 -07:00
|
|
|
if (!pe) {
|
2009-04-02 12:55:25 -06:00
|
|
|
up_write(&snap->lock);
|
|
|
|
pe = alloc_pending_exception(snap);
|
|
|
|
down_write(&snap->lock);
|
|
|
|
|
|
|
|
if (!snap->valid) {
|
|
|
|
free_pending_exception(pe);
|
|
|
|
goto next_snapshot;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:26 -06:00
|
|
|
e = lookup_exception(&snap->complete, chunk);
|
|
|
|
if (e) {
|
|
|
|
free_pending_exception(pe);
|
|
|
|
goto next_snapshot;
|
|
|
|
}
|
|
|
|
|
2009-04-02 12:55:25 -06:00
|
|
|
pe = __find_pending_exception(snap, pe, chunk);
|
2009-04-02 12:55:25 -06:00
|
|
|
if (!pe) {
|
|
|
|
__invalidate_snapshot(snap, -ENOMEM);
|
|
|
|
goto next_snapshot;
|
|
|
|
}
|
2006-03-27 02:17:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!primary_pe) {
|
|
|
|
/*
|
|
|
|
* Either every pe here has same
|
|
|
|
* primary_pe or none has one yet.
|
|
|
|
*/
|
|
|
|
if (pe->primary_pe)
|
|
|
|
primary_pe = pe->primary_pe;
|
|
|
|
else {
|
|
|
|
primary_pe = pe;
|
|
|
|
first = 1;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2006-03-27 02:17:45 -07:00
|
|
|
|
|
|
|
bio_list_add(&primary_pe->origin_bios, bio);
|
|
|
|
|
2006-12-08 03:41:06 -07:00
|
|
|
r = DM_MAPIO_SUBMITTED;
|
2006-03-27 02:17:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!pe->primary_pe) {
|
|
|
|
pe->primary_pe = primary_pe;
|
2006-10-03 02:15:30 -06:00
|
|
|
get_pending_exception(primary_pe);
|
2006-03-27 02:17:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!pe->started) {
|
|
|
|
pe->started = 1;
|
|
|
|
list_add_tail(&pe->list, &pe_queue);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2006-03-27 02:17:45 -07:00
|
|
|
next_snapshot:
|
2005-04-16 16:20:36 -06:00
|
|
|
up_write(&snap->lock);
|
|
|
|
}
|
|
|
|
|
2006-03-27 02:17:44 -07:00
|
|
|
if (!primary_pe)
|
2006-10-03 02:15:30 -06:00
|
|
|
return r;
|
2006-03-27 02:17:44 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the first time we're processing this chunk and
|
2006-10-03 02:15:30 -06:00
|
|
|
* ref_count is now 1 it means all the pending exceptions
|
2006-03-27 02:17:44 -07:00
|
|
|
* got completed while we were in the loop above, so it falls to
|
|
|
|
* us here to remove the primary_pe and submit any origin_bios.
|
|
|
|
*/
|
|
|
|
|
2006-10-03 02:15:30 -06:00
|
|
|
if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
|
2006-03-27 02:17:44 -07:00
|
|
|
flush_bios(bio_list_get(&primary_pe->origin_bios));
|
|
|
|
free_pending_exception(primary_pe);
|
|
|
|
/* If we got here, pe_queue is necessarily empty. */
|
2006-10-03 02:15:30 -06:00
|
|
|
return r;
|
2006-03-27 02:17:44 -07:00
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Now that we have a complete pe list we can start the copying.
|
|
|
|
*/
|
2006-03-27 02:17:42 -07:00
|
|
|
list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
|
|
|
|
start_copy(pe);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called on a write from the origin driver.
|
|
|
|
*/
|
|
|
|
static int do_origin(struct dm_dev *origin, struct bio *bio)
|
|
|
|
{
|
|
|
|
struct origin *o;
|
2006-12-08 03:41:06 -07:00
|
|
|
int r = DM_MAPIO_REMAPPED;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
down_read(&_origins_lock);
|
|
|
|
o = __lookup_origin(origin->bdev);
|
|
|
|
if (o)
|
|
|
|
r = __origin_write(&o->snapshots, bio);
|
|
|
|
up_read(&_origins_lock);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Origin: maps a linear range of a device, with hooks for snapshotting.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Construct an origin mapping: <dev_path>
|
|
|
|
* The context for an origin is merely a 'struct dm_dev *'
|
|
|
|
* pointing to the real device.
|
|
|
|
*/
|
|
|
|
static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct dm_dev *dev;
|
|
|
|
|
|
|
|
if (argc != 1) {
|
2006-06-26 01:27:35 -06:00
|
|
|
ti->error = "origin: incorrect number of arguments";
|
2005-04-16 16:20:36 -06:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dm_get_device(ti, argv[0], 0, ti->len,
|
|
|
|
dm_table_get_mode(ti->table), &dev);
|
|
|
|
if (r) {
|
|
|
|
ti->error = "Cannot get target device";
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
ti->private = dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void origin_dtr(struct dm_target *ti)
|
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_dev *dev = ti->private;
|
2005-04-16 16:20:36 -06:00
|
|
|
dm_put_device(ti, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int origin_map(struct dm_target *ti, struct bio *bio,
|
|
|
|
union map_info *map_context)
|
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_dev *dev = ti->private;
|
2005-04-16 16:20:36 -06:00
|
|
|
bio->bi_bdev = dev->bdev;
|
|
|
|
|
|
|
|
/* Only tell snapshots if this is a write */
|
2006-12-08 03:41:06 -07:00
|
|
|
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the target "split_io" field to the minimum of all the snapshots'
|
|
|
|
* chunk sizes.
|
|
|
|
*/
|
|
|
|
static void origin_resume(struct dm_target *ti)
|
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_dev *dev = ti->private;
|
2005-04-16 16:20:36 -06:00
|
|
|
struct dm_snapshot *snap;
|
|
|
|
struct origin *o;
|
|
|
|
chunk_t chunk_size = 0;
|
|
|
|
|
|
|
|
down_read(&_origins_lock);
|
|
|
|
o = __lookup_origin(dev->bdev);
|
|
|
|
if (o)
|
|
|
|
list_for_each_entry (snap, &o->snapshots, list)
|
2009-04-02 12:55:32 -06:00
|
|
|
chunk_size = min_not_zero(chunk_size,
|
|
|
|
snap->store->chunk_size);
|
2005-04-16 16:20:36 -06:00
|
|
|
up_read(&_origins_lock);
|
|
|
|
|
|
|
|
ti->split_io = chunk_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int origin_status(struct dm_target *ti, status_type_t type, char *result,
|
|
|
|
unsigned int maxlen)
|
|
|
|
{
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_dev *dev = ti->private;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case STATUSTYPE_INFO:
|
|
|
|
result[0] = '\0';
|
|
|
|
break;
|
|
|
|
|
|
|
|
case STATUSTYPE_TABLE:
|
|
|
|
snprintf(result, maxlen, "%s", dev->name);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct target_type origin_target = {
|
|
|
|
.name = "snapshot-origin",
|
2008-02-07 19:11:27 -07:00
|
|
|
.version = {1, 6, 0},
|
2005-04-16 16:20:36 -06:00
|
|
|
.module = THIS_MODULE,
|
|
|
|
.ctr = origin_ctr,
|
|
|
|
.dtr = origin_dtr,
|
|
|
|
.map = origin_map,
|
|
|
|
.resume = origin_resume,
|
|
|
|
.status = origin_status,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct target_type snapshot_target = {
|
|
|
|
.name = "snapshot",
|
2008-02-07 19:11:27 -07:00
|
|
|
.version = {1, 6, 0},
|
2005-04-16 16:20:36 -06:00
|
|
|
.module = THIS_MODULE,
|
|
|
|
.ctr = snapshot_ctr,
|
|
|
|
.dtr = snapshot_dtr,
|
|
|
|
.map = snapshot_map,
|
2008-07-21 05:00:32 -06:00
|
|
|
.end_io = snapshot_end_io,
|
2005-04-16 16:20:36 -06:00
|
|
|
.resume = snapshot_resume,
|
|
|
|
.status = snapshot_status,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init dm_snapshot_init(void)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2009-01-05 20:05:17 -07:00
|
|
|
r = dm_exception_store_init();
|
|
|
|
if (r) {
|
|
|
|
DMERR("Failed to initialize exception stores");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
r = dm_register_target(&snapshot_target);
|
|
|
|
if (r) {
|
|
|
|
DMERR("snapshot target register failed %d", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dm_register_target(&origin_target);
|
|
|
|
if (r < 0) {
|
2006-06-26 01:27:35 -06:00
|
|
|
DMERR("Origin target register failed %d", r);
|
2005-04-16 16:20:36 -06:00
|
|
|
goto bad1;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = init_origin_hash();
|
|
|
|
if (r) {
|
|
|
|
DMERR("init_origin_hash failed.");
|
|
|
|
goto bad2;
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
exception_cache = KMEM_CACHE(dm_snap_exception, 0);
|
2005-04-16 16:20:36 -06:00
|
|
|
if (!exception_cache) {
|
|
|
|
DMERR("Couldn't create exception cache.");
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad3;
|
|
|
|
}
|
|
|
|
|
2007-07-12 10:26:32 -06:00
|
|
|
pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
|
2005-04-16 16:20:36 -06:00
|
|
|
if (!pending_cache) {
|
|
|
|
DMERR("Couldn't create pending cache.");
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad4;
|
|
|
|
}
|
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
|
|
|
|
if (!tracked_chunk_cache) {
|
|
|
|
DMERR("Couldn't create cache to track chunks in use.");
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad5;
|
|
|
|
}
|
|
|
|
|
2006-10-03 02:15:30 -06:00
|
|
|
ksnapd = create_singlethread_workqueue("ksnapd");
|
|
|
|
if (!ksnapd) {
|
|
|
|
DMERR("Failed to create ksnapd workqueue.");
|
|
|
|
r = -ENOMEM;
|
2008-07-21 05:00:35 -06:00
|
|
|
goto bad_pending_pool;
|
2006-10-03 02:15:30 -06:00
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
return 0;
|
|
|
|
|
2009-01-05 20:05:17 -07:00
|
|
|
bad_pending_pool:
|
2008-07-21 05:00:32 -06:00
|
|
|
kmem_cache_destroy(tracked_chunk_cache);
|
2009-01-05 20:05:17 -07:00
|
|
|
bad5:
|
2005-04-16 16:20:36 -06:00
|
|
|
kmem_cache_destroy(pending_cache);
|
2009-01-05 20:05:17 -07:00
|
|
|
bad4:
|
2005-04-16 16:20:36 -06:00
|
|
|
kmem_cache_destroy(exception_cache);
|
2009-01-05 20:05:17 -07:00
|
|
|
bad3:
|
2005-04-16 16:20:36 -06:00
|
|
|
exit_origin_hash();
|
2009-01-05 20:05:17 -07:00
|
|
|
bad2:
|
2005-04-16 16:20:36 -06:00
|
|
|
dm_unregister_target(&origin_target);
|
2009-01-05 20:05:17 -07:00
|
|
|
bad1:
|
2005-04-16 16:20:36 -06:00
|
|
|
dm_unregister_target(&snapshot_target);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit dm_snapshot_exit(void)
|
|
|
|
{
|
2006-10-03 02:15:30 -06:00
|
|
|
destroy_workqueue(ksnapd);
|
|
|
|
|
2009-01-05 20:04:58 -07:00
|
|
|
dm_unregister_target(&snapshot_target);
|
|
|
|
dm_unregister_target(&origin_target);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
exit_origin_hash();
|
|
|
|
kmem_cache_destroy(pending_cache);
|
|
|
|
kmem_cache_destroy(exception_cache);
|
2008-07-21 05:00:32 -06:00
|
|
|
kmem_cache_destroy(tracked_chunk_cache);
|
2009-01-05 20:05:17 -07:00
|
|
|
|
|
|
|
dm_exception_store_exit();
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Module hooks */
|
|
|
|
module_init(dm_snapshot_init);
|
|
|
|
module_exit(dm_snapshot_exit);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION(DM_NAME " snapshot target");
|
|
|
|
MODULE_AUTHOR("Joe Thornber");
|
|
|
|
MODULE_LICENSE("GPL");
|