2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* dm-snapshot.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
|
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef DM_SNAPSHOT_H
|
|
|
|
#define DM_SNAPSHOT_H
|
|
|
|
|
|
|
|
#include "dm.h"
|
2006-10-03 02:15:30 -06:00
|
|
|
#include "dm-bio-list.h"
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/blkdev.h>
|
2006-10-03 02:15:30 -06:00
|
|
|
#include <linux/workqueue.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
struct exception_table {
|
|
|
|
uint32_t hash_mask;
|
2008-02-07 19:11:27 -07:00
|
|
|
unsigned hash_shift;
|
2005-04-16 16:20:36 -06:00
|
|
|
struct list_head *table;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The snapshot code deals with largish chunks of the disk at a
|
2008-02-07 19:11:27 -07:00
|
|
|
* time. Typically 32k - 512k.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
|
|
|
typedef sector_t chunk_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* An exception is used where an old chunk of data has been
|
|
|
|
* replaced by a new one.
|
2008-02-07 19:11:27 -07:00
|
|
|
* If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number
|
|
|
|
* of chunks that follow contiguously. Remaining bits hold the number of the
|
|
|
|
* chunk within the device.
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception {
|
2005-04-16 16:20:36 -06:00
|
|
|
struct list_head hash_list;
|
|
|
|
|
|
|
|
chunk_t old_chunk;
|
|
|
|
chunk_t new_chunk;
|
|
|
|
};
|
|
|
|
|
2008-02-07 19:11:27 -07:00
|
|
|
/*
|
|
|
|
* Funtions to manipulate consecutive chunks
|
|
|
|
*/
|
|
|
|
# if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
|
|
|
|
# define DM_CHUNK_CONSECUTIVE_BITS 8
|
|
|
|
# define DM_CHUNK_NUMBER_BITS 56
|
|
|
|
|
|
|
|
static inline chunk_t dm_chunk_number(chunk_t chunk)
|
|
|
|
{
|
|
|
|
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
|
|
|
|
{
|
|
|
|
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
|
|
|
|
{
|
|
|
|
e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
|
|
|
|
|
|
|
|
BUG_ON(!dm_consecutive_chunk_count(e));
|
|
|
|
}
|
|
|
|
|
|
|
|
# else
|
|
|
|
# define DM_CHUNK_CONSECUTIVE_BITS 0
|
|
|
|
|
|
|
|
static inline chunk_t dm_chunk_number(chunk_t chunk)
|
|
|
|
{
|
|
|
|
return chunk;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
# endif
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Abstraction to handle the meta/layout of exception stores (the
|
|
|
|
* COW device).
|
|
|
|
*/
|
|
|
|
struct exception_store {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroys this object when you've finished with it.
|
|
|
|
*/
|
|
|
|
void (*destroy) (struct exception_store *store);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The target shouldn't read the COW device until this is
|
|
|
|
* called.
|
|
|
|
*/
|
|
|
|
int (*read_metadata) (struct exception_store *store);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find somewhere to store the next exception.
|
|
|
|
*/
|
|
|
|
int (*prepare_exception) (struct exception_store *store,
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the metadata with this exception.
|
|
|
|
*/
|
|
|
|
void (*commit_exception) (struct exception_store *store,
|
2007-07-12 10:26:32 -06:00
|
|
|
struct dm_snap_exception *e,
|
2005-04-16 16:20:36 -06:00
|
|
|
void (*callback) (void *, int success),
|
|
|
|
void *callback_context);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The snapshot is invalid, note this in the metadata.
|
|
|
|
*/
|
|
|
|
void (*drop_snapshot) (struct exception_store *store);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return how full the snapshot is.
|
|
|
|
*/
|
|
|
|
void (*fraction_full) (struct exception_store *store,
|
|
|
|
sector_t *numerator,
|
|
|
|
sector_t *denominator);
|
|
|
|
|
|
|
|
struct dm_snapshot *snap;
|
|
|
|
void *context;
|
|
|
|
};
|
|
|
|
|
2008-07-21 05:00:32 -06:00
|
|
|
#define DM_TRACKED_CHUNK_HASH_SIZE 16
|
|
|
|
#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
|
|
|
|
(DM_TRACKED_CHUNK_HASH_SIZE - 1))
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
struct dm_snapshot {
|
|
|
|
struct rw_semaphore lock;
|
2008-04-24 14:43:11 -06:00
|
|
|
struct dm_target *ti;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
struct dm_dev *origin;
|
|
|
|
struct dm_dev *cow;
|
|
|
|
|
|
|
|
/* List of snapshots per Origin */
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
/* Size of data blocks saved - must be a power of 2 */
|
|
|
|
chunk_t chunk_size;
|
|
|
|
chunk_t chunk_mask;
|
|
|
|
chunk_t chunk_shift;
|
|
|
|
|
|
|
|
/* You can't use a snapshot if this is 0 (e.g. if full) */
|
|
|
|
int valid;
|
2006-02-01 04:04:50 -07:00
|
|
|
|
|
|
|
/* Origin writes don't trigger exceptions until this is set */
|
|
|
|
int active;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Used for display of table */
|
|
|
|
char type;
|
|
|
|
|
|
|
|
/* The last percentage we notified */
|
|
|
|
int last_percent;
|
|
|
|
|
2008-07-21 05:00:35 -06:00
|
|
|
mempool_t *pending_pool;
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
struct exception_table pending;
|
|
|
|
struct exception_table complete;
|
|
|
|
|
2006-10-03 02:15:30 -06:00
|
|
|
/*
|
|
|
|
* pe_lock protects all pending_exception operations and access
|
|
|
|
* as well as the snapshot_bios list.
|
|
|
|
*/
|
|
|
|
spinlock_t pe_lock;
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/* The on disk metadata handler */
|
|
|
|
struct exception_store store;
|
|
|
|
|
2008-04-24 14:43:19 -06:00
|
|
|
struct dm_kcopyd_client *kcopyd_client;
|
2006-10-03 02:15:30 -06:00
|
|
|
|
|
|
|
/* Queue of snapshot writes for ksnapd to flush */
|
|
|
|
struct bio_list queued_bios;
|
|
|
|
struct work_struct queued_bios_work;
|
2008-07-21 05:00:32 -06:00
|
|
|
|
|
|
|
/* Chunks with outstanding reads */
|
|
|
|
mempool_t *tracked_chunk_pool;
|
|
|
|
spinlock_t tracked_chunk_lock;
|
|
|
|
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Used by the exception stores to load exceptions hen
|
|
|
|
* initialising.
|
|
|
|
*/
|
|
|
|
int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Constructor and destructor for the default persistent
|
|
|
|
* store.
|
|
|
|
*/
|
2006-10-03 02:15:25 -06:00
|
|
|
int dm_create_persistent(struct exception_store *store);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-10-03 02:15:25 -06:00
|
|
|
int dm_create_transient(struct exception_store *store);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the number of sectors in the device.
|
|
|
|
*/
|
|
|
|
static inline sector_t get_dev_size(struct block_device *bdev)
|
|
|
|
{
|
|
|
|
return bdev->bd_inode->i_size >> SECTOR_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector)
|
|
|
|
{
|
|
|
|
return (sector & ~s->chunk_mask) >> s->chunk_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk)
|
|
|
|
{
|
|
|
|
return chunk << s->chunk_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int bdev_equal(struct block_device *lhs, struct block_device *rhs)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* There is only ever one instance of a particular block
|
|
|
|
* device so we can compare pointers safely.
|
|
|
|
*/
|
|
|
|
return lhs == rhs;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|