drbd: Rename drbd_tconn -> drbd_connection
sed -i -e 's:all_tconn:connections:g' -e 's:tconn:connection:g' Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
This commit is contained in:
parent
b30ab7913b
commit
bde89a9e15
12 changed files with 1447 additions and 1447 deletions
|
@ -315,7 +315,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
|
|||
{
|
||||
bool locked = false;
|
||||
|
||||
BUG_ON(delegate && current == device->tconn->worker.task);
|
||||
BUG_ON(delegate && current == device->connection->worker.task);
|
||||
|
||||
/* Serialize multiple transactions.
|
||||
* This uses test_and_set_bit, memory barrier is implicit.
|
||||
|
@ -354,7 +354,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
|
|||
*/
|
||||
void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate)
|
||||
{
|
||||
BUG_ON(delegate && current == device->tconn->worker.task);
|
||||
BUG_ON(delegate && current == device->connection->worker.task);
|
||||
|
||||
if (drbd_al_begin_io_prepare(device, i))
|
||||
drbd_al_begin_io_commit(device, delegate);
|
||||
|
@ -614,7 +614,7 @@ static int al_write_transaction(struct drbd_device *device, bool delegate)
|
|||
init_completion(&al_work.event);
|
||||
al_work.w.cb = w_al_write_transaction;
|
||||
al_work.w.device = device;
|
||||
drbd_queue_work_front(&device->tconn->sender_work, &al_work.w);
|
||||
drbd_queue_work_front(&device->connection->sender_work, &al_work.w);
|
||||
wait_for_completion(&al_work.event);
|
||||
return al_work.err;
|
||||
} else
|
||||
|
@ -796,7 +796,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
|
|||
udw->enr = ext->lce.lc_number;
|
||||
udw->w.cb = w_update_odbm;
|
||||
udw->w.device = device;
|
||||
drbd_queue_work_front(&device->tconn->sender_work, &udw->w);
|
||||
drbd_queue_work_front(&device->connection->sender_work, &udw->w);
|
||||
} else {
|
||||
dev_warn(DEV, "Could not kmalloc an udw\n");
|
||||
}
|
||||
|
|
|
@ -119,9 +119,9 @@ static void __bm_print_lock_info(struct drbd_device *device, const char *func)
|
|||
if (!__ratelimit(&drbd_ratelimit_state))
|
||||
return;
|
||||
dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
|
||||
drbd_task_to_thread_name(device->tconn, current),
|
||||
drbd_task_to_thread_name(device->connection, current),
|
||||
func, b->bm_why ?: "?",
|
||||
drbd_task_to_thread_name(device->tconn, b->bm_task));
|
||||
drbd_task_to_thread_name(device->connection, b->bm_task));
|
||||
}
|
||||
|
||||
void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
|
||||
|
@ -138,9 +138,9 @@ void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
|
|||
|
||||
if (trylock_failed) {
|
||||
dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
|
||||
drbd_task_to_thread_name(device->tconn, current),
|
||||
drbd_task_to_thread_name(device->connection, current),
|
||||
why, b->bm_why ?: "?",
|
||||
drbd_task_to_thread_name(device->tconn, b->bm_task));
|
||||
drbd_task_to_thread_name(device->connection, b->bm_task));
|
||||
mutex_lock(&b->bm_change);
|
||||
}
|
||||
if (BM_LOCKED_MASK & b->bm_flags)
|
||||
|
|
|
@ -98,7 +98,7 @@ extern char usermode_helper[];
|
|||
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
|
||||
|
||||
struct drbd_device;
|
||||
struct drbd_tconn;
|
||||
struct drbd_connection;
|
||||
|
||||
|
||||
/* to shorten dev_warn(DEV, "msg"); and relatives statements */
|
||||
|
@ -167,7 +167,7 @@ drbd_insert_fault(struct drbd_device *device, unsigned int type) {
|
|||
|
||||
extern struct ratelimit_state drbd_ratelimit_state;
|
||||
extern struct idr minors; /* RCU, updates: genl_lock() */
|
||||
extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */
|
||||
extern struct list_head drbd_connections; /* RCU, updates: genl_lock() */
|
||||
|
||||
extern const char *cmdname(enum drbd_packet cmd);
|
||||
|
||||
|
@ -211,7 +211,7 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
|
|||
#endif
|
||||
}
|
||||
|
||||
extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
|
||||
extern unsigned int drbd_header_size(struct drbd_connection *connection);
|
||||
|
||||
/**********************************************************************/
|
||||
enum drbd_thread_state {
|
||||
|
@ -227,7 +227,7 @@ struct drbd_thread {
|
|||
struct completion stop;
|
||||
enum drbd_thread_state t_state;
|
||||
int (*function) (struct drbd_thread *);
|
||||
struct drbd_tconn *tconn;
|
||||
struct drbd_connection *connection;
|
||||
int reset_cpu_mask;
|
||||
char name[9];
|
||||
};
|
||||
|
@ -247,7 +247,7 @@ struct drbd_work {
|
|||
int (*cb)(struct drbd_work *, int cancel);
|
||||
union {
|
||||
struct drbd_device *device;
|
||||
struct drbd_tconn *tconn;
|
||||
struct drbd_connection *connection;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -289,7 +289,7 @@ struct drbd_request {
|
|||
};
|
||||
|
||||
struct drbd_epoch {
|
||||
struct drbd_tconn *tconn;
|
||||
struct drbd_connection *connection;
|
||||
struct list_head list;
|
||||
unsigned int barrier_nr;
|
||||
atomic_t epoch_size; /* increased on every request added. */
|
||||
|
@ -483,7 +483,7 @@ struct drbd_backing_dev {
|
|||
struct block_device *backing_bdev;
|
||||
struct block_device *md_bdev;
|
||||
struct drbd_md md;
|
||||
struct disk_conf *disk_conf; /* RCU, for updates: device->tconn->conf_update */
|
||||
struct disk_conf *disk_conf; /* RCU, for updates: device->connection->conf_update */
|
||||
sector_t known_size; /* last known size of that backing device */
|
||||
};
|
||||
|
||||
|
@ -514,7 +514,7 @@ struct fifo_buffer {
|
|||
};
|
||||
extern struct fifo_buffer *fifo_alloc(int fifo_size);
|
||||
|
||||
/* flag bits per tconn */
|
||||
/* flag bits per connection */
|
||||
enum {
|
||||
NET_CONGESTED, /* The data socket is congested */
|
||||
RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
|
||||
|
@ -536,11 +536,11 @@ enum {
|
|||
DISCONNECT_SENT,
|
||||
};
|
||||
|
||||
struct drbd_tconn { /* is a resource from the config file */
|
||||
struct drbd_connection { /* is a resource from the config file */
|
||||
char *name; /* Resource name */
|
||||
struct list_head all_tconn; /* linked on global drbd_tconns */
|
||||
struct list_head connections; /* linked on global drbd_connections */
|
||||
struct kref kref;
|
||||
struct idr volumes; /* <tconn, vnr> to device mapping */
|
||||
struct idr volumes; /* <connection, vnr> to device mapping */
|
||||
enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
|
||||
unsigned susp:1; /* IO suspended by user */
|
||||
unsigned susp_nod:1; /* IO suspended because no data */
|
||||
|
@ -570,7 +570,7 @@ struct drbd_tconn { /* is a resource from the config file */
|
|||
struct list_head transfer_log; /* all requests not yet fully processed */
|
||||
|
||||
struct crypto_hash *cram_hmac_tfm;
|
||||
struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */
|
||||
struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
|
||||
struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
|
||||
struct crypto_hash *csums_tfm;
|
||||
struct crypto_hash *verify_tfm;
|
||||
|
@ -618,7 +618,7 @@ struct submit_worker {
|
|||
};
|
||||
|
||||
struct drbd_device {
|
||||
struct drbd_tconn *tconn;
|
||||
struct drbd_connection *connection;
|
||||
int vnr; /* volume number within the connection */
|
||||
struct kref kref;
|
||||
|
||||
|
@ -744,7 +744,7 @@ struct drbd_device {
|
|||
struct bm_io_work bm_io_work;
|
||||
u64 ed_uuid; /* UUID of the exposed data */
|
||||
struct mutex own_state_mutex;
|
||||
struct mutex *state_mutex; /* either own_state_mutex or device->tconn->cstate_mutex */
|
||||
struct mutex *state_mutex; /* either own_state_mutex or device->connection->cstate_mutex */
|
||||
char congestion_reason; /* Why we where congested... */
|
||||
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
|
||||
atomic_t rs_sect_ev; /* for submitted resync data rate, both */
|
||||
|
@ -752,7 +752,7 @@ struct drbd_device {
|
|||
int rs_last_events; /* counter of read or write "events" (unit sectors)
|
||||
* on the lower level device when we last looked. */
|
||||
int c_sync_rate; /* current resync rate after syncer throttle magic */
|
||||
struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */
|
||||
struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
|
||||
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
|
||||
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
|
||||
unsigned int peer_max_bio_size;
|
||||
|
@ -773,9 +773,9 @@ static inline unsigned int device_to_minor(struct drbd_device *device)
|
|||
return device->minor;
|
||||
}
|
||||
|
||||
static inline struct drbd_device *vnr_to_device(struct drbd_tconn *tconn, int vnr)
|
||||
static inline struct drbd_device *vnr_to_device(struct drbd_connection *connection, int vnr)
|
||||
{
|
||||
return (struct drbd_device *)idr_find(&tconn->volumes, vnr);
|
||||
return (struct drbd_device *)idr_find(&connection->volumes, vnr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -792,25 +792,25 @@ enum dds_flags {
|
|||
extern void drbd_init_set_defaults(struct drbd_device *device);
|
||||
extern int drbd_thread_start(struct drbd_thread *thi);
|
||||
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
|
||||
extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
|
||||
extern char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task);
|
||||
#ifdef CONFIG_SMP
|
||||
extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
|
||||
extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
|
||||
extern void drbd_calc_cpu_mask(struct drbd_connection *connection);
|
||||
#else
|
||||
#define drbd_thread_current_set_cpu(A) ({})
|
||||
#define drbd_calc_cpu_mask(A) ({})
|
||||
#endif
|
||||
extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
|
||||
extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
|
||||
unsigned int set_size);
|
||||
extern void tl_clear(struct drbd_tconn *);
|
||||
extern void drbd_free_sock(struct drbd_tconn *tconn);
|
||||
extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
|
||||
extern void tl_clear(struct drbd_connection *);
|
||||
extern void drbd_free_sock(struct drbd_connection *connection);
|
||||
extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
|
||||
void *buf, size_t size, unsigned msg_flags);
|
||||
extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
|
||||
extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
|
||||
unsigned);
|
||||
|
||||
extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
|
||||
extern int drbd_send_protocol(struct drbd_tconn *tconn);
|
||||
extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
|
||||
extern int drbd_send_protocol(struct drbd_connection *connection);
|
||||
extern int drbd_send_uuids(struct drbd_device *device);
|
||||
extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *device);
|
||||
extern void drbd_gen_and_send_sync_uuid(struct drbd_device *device);
|
||||
|
@ -818,7 +818,7 @@ extern int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum d
|
|||
extern int drbd_send_state(struct drbd_device *device, union drbd_state s);
|
||||
extern int drbd_send_current_state(struct drbd_device *device);
|
||||
extern int drbd_send_sync_param(struct drbd_device *device);
|
||||
extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
|
||||
extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
|
||||
u32 set_size);
|
||||
extern int drbd_send_ack(struct drbd_device *, enum drbd_packet,
|
||||
struct drbd_peer_request *);
|
||||
|
@ -841,12 +841,12 @@ extern int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int
|
|||
|
||||
extern int drbd_send_bitmap(struct drbd_device *device);
|
||||
extern void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode);
|
||||
extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
|
||||
extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
|
||||
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
|
||||
extern void drbd_device_cleanup(struct drbd_device *device);
|
||||
void drbd_print_uuids(struct drbd_device *device, const char *text);
|
||||
|
||||
extern void conn_md_sync(struct drbd_tconn *tconn);
|
||||
extern void conn_md_sync(struct drbd_connection *connection);
|
||||
extern void drbd_md_write(struct drbd_device *device, void *buffer);
|
||||
extern void drbd_md_sync(struct drbd_device *device);
|
||||
extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
|
||||
|
@ -1153,17 +1153,17 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
|
|||
|
||||
extern rwlock_t global_state_lock;
|
||||
|
||||
extern int conn_lowest_minor(struct drbd_tconn *tconn);
|
||||
enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
|
||||
extern int conn_lowest_minor(struct drbd_connection *connection);
|
||||
enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned int minor, int vnr);
|
||||
extern void drbd_minor_destroy(struct kref *kref);
|
||||
|
||||
extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
|
||||
extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
|
||||
extern int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts);
|
||||
extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
|
||||
extern void conn_destroy(struct kref *kref);
|
||||
struct drbd_tconn *conn_get_by_name(const char *name);
|
||||
extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
|
||||
struct drbd_connection *conn_get_by_name(const char *name);
|
||||
extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
|
||||
void *peer_addr, int peer_addr_len);
|
||||
extern void conn_free_crypto(struct drbd_tconn *tconn);
|
||||
extern void conn_free_crypto(struct drbd_connection *connection);
|
||||
|
||||
extern int proc_details;
|
||||
|
||||
|
@ -1198,8 +1198,8 @@ extern void drbd_reconsider_max_bio_size(struct drbd_device *device);
|
|||
extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
|
||||
enum drbd_role new_role,
|
||||
int force);
|
||||
extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
|
||||
extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
|
||||
extern bool conn_try_outdate_peer(struct drbd_connection *connection);
|
||||
extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
|
||||
extern int drbd_khelper(struct drbd_device *device, char *cmd);
|
||||
|
||||
/* drbd_worker.c */
|
||||
|
@ -1271,11 +1271,11 @@ extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request
|
|||
extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool);
|
||||
extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
|
||||
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
|
||||
extern void conn_flush_workqueue(struct drbd_tconn *tconn);
|
||||
extern void conn_flush_workqueue(struct drbd_connection *connection);
|
||||
extern int drbd_connected(struct drbd_device *device);
|
||||
static inline void drbd_flush_workqueue(struct drbd_device *device)
|
||||
{
|
||||
conn_flush_workqueue(device->tconn);
|
||||
conn_flush_workqueue(device->connection);
|
||||
}
|
||||
|
||||
/* Yes, there is kernel_setsockopt, but only since 2.6.18.
|
||||
|
@ -1327,7 +1327,7 @@ static inline void drbd_tcp_quickack(struct socket *sock)
|
|||
(char*)&val, sizeof(val));
|
||||
}
|
||||
|
||||
void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
|
||||
void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo);
|
||||
|
||||
/* drbd_proc.c */
|
||||
extern struct proc_dir_entry *drbd_proc;
|
||||
|
@ -1421,9 +1421,9 @@ static inline union drbd_state drbd_read_state(struct drbd_device *device)
|
|||
union drbd_state rv;
|
||||
|
||||
rv.i = device->state.i;
|
||||
rv.susp = device->tconn->susp;
|
||||
rv.susp_nod = device->tconn->susp_nod;
|
||||
rv.susp_fen = device->tconn->susp_fen;
|
||||
rv.susp = device->connection->susp;
|
||||
rv.susp_nod = device->connection->susp_nod;
|
||||
rv.susp_fen = device->connection->susp_fen;
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -1505,9 +1505,9 @@ static inline void drbd_chk_io_error_(struct drbd_device *device,
|
|||
{
|
||||
if (error) {
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
__drbd_chk_io_error_(device, forcedetach, where);
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1630,31 +1630,31 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
|
|||
wake_up(&q->q_wait);
|
||||
}
|
||||
|
||||
static inline void wake_asender(struct drbd_tconn *tconn)
|
||||
static inline void wake_asender(struct drbd_connection *connection)
|
||||
{
|
||||
if (test_bit(SIGNAL_ASENDER, &tconn->flags))
|
||||
force_sig(DRBD_SIG, tconn->asender.task);
|
||||
if (test_bit(SIGNAL_ASENDER, &connection->flags))
|
||||
force_sig(DRBD_SIG, connection->asender.task);
|
||||
}
|
||||
|
||||
static inline void request_ping(struct drbd_tconn *tconn)
|
||||
static inline void request_ping(struct drbd_connection *connection)
|
||||
{
|
||||
set_bit(SEND_PING, &tconn->flags);
|
||||
wake_asender(tconn);
|
||||
set_bit(SEND_PING, &connection->flags);
|
||||
wake_asender(connection);
|
||||
}
|
||||
|
||||
extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
|
||||
extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
|
||||
extern void *drbd_prepare_command(struct drbd_device *, struct drbd_socket *);
|
||||
extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
|
||||
extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
|
||||
enum drbd_packet, unsigned int, void *,
|
||||
unsigned int);
|
||||
extern int drbd_send_command(struct drbd_device *, struct drbd_socket *,
|
||||
enum drbd_packet, unsigned int, void *,
|
||||
unsigned int);
|
||||
|
||||
extern int drbd_send_ping(struct drbd_tconn *tconn);
|
||||
extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
|
||||
extern int drbd_send_ping(struct drbd_connection *connection);
|
||||
extern int drbd_send_ping_ack(struct drbd_connection *connection);
|
||||
extern int drbd_send_state_req(struct drbd_device *, union drbd_state, union drbd_state);
|
||||
extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
|
||||
extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
|
||||
|
||||
static inline void drbd_thread_stop(struct drbd_thread *thi)
|
||||
{
|
||||
|
@ -1783,7 +1783,7 @@ static inline void put_ldev(struct drbd_device *device)
|
|||
if (device->state.disk == D_FAILED) {
|
||||
/* all application IO references gone. */
|
||||
if (!test_and_set_bit(GO_DISKLESS, &device->flags))
|
||||
drbd_queue_work(&device->tconn->sender_work, &device->go_diskless);
|
||||
drbd_queue_work(&device->connection->sender_work, &device->go_diskless);
|
||||
}
|
||||
wake_up(&device->misc_wait);
|
||||
}
|
||||
|
@ -1865,7 +1865,7 @@ static inline int drbd_get_max_buffers(struct drbd_device *device)
|
|||
int mxb;
|
||||
|
||||
rcu_read_lock();
|
||||
nc = rcu_dereference(device->tconn->net_conf);
|
||||
nc = rcu_dereference(device->connection->net_conf);
|
||||
mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -1908,7 +1908,7 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
|
|||
|
||||
/* Allow IO in BM exchange states with new protocols */
|
||||
case C_WF_BITMAP_S:
|
||||
if (device->tconn->agreed_pro_version < 96)
|
||||
if (device->connection->agreed_pro_version < 96)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
|
@ -1944,9 +1944,9 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
|
|||
|
||||
static inline int drbd_suspended(struct drbd_device *device)
|
||||
{
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
|
||||
return tconn->susp || tconn->susp_fen || tconn->susp_nod;
|
||||
return connection->susp || connection->susp_fen || connection->susp_nod;
|
||||
}
|
||||
|
||||
static inline bool may_inc_ap_bio(struct drbd_device *device)
|
||||
|
@ -1979,11 +1979,11 @@ static inline bool inc_ap_bio_cond(struct drbd_device *device)
|
|||
{
|
||||
bool rv = false;
|
||||
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
rv = may_inc_ap_bio(device);
|
||||
if (rv)
|
||||
atomic_inc(&device->ap_bio_cnt);
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -2010,7 +2010,7 @@ static inline void dec_ap_bio(struct drbd_device *device)
|
|||
|
||||
if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
|
||||
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
|
||||
drbd_queue_work(&device->tconn->sender_work, &device->bm_io_work.w);
|
||||
drbd_queue_work(&device->connection->sender_work, &device->bm_io_work.w);
|
||||
}
|
||||
|
||||
/* this currently does wake_up for every dec_ap_bio!
|
||||
|
@ -2022,8 +2022,8 @@ static inline void dec_ap_bio(struct drbd_device *device)
|
|||
|
||||
static inline bool verify_can_do_stop_sector(struct drbd_device *device)
|
||||
{
|
||||
return device->tconn->agreed_pro_version >= 97 &&
|
||||
device->tconn->agreed_pro_version != 100;
|
||||
return device->connection->agreed_pro_version >= 97 &&
|
||||
device->connection->agreed_pro_version != 100;
|
||||
}
|
||||
|
||||
static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -251,7 +251,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
|
|||
/* reset device->congestion_reason */
|
||||
bdi_rw_congested(&device->rq_queue->backing_dev_info);
|
||||
|
||||
nc = rcu_dereference(device->tconn->net_conf);
|
||||
nc = rcu_dereference(device->connection->net_conf);
|
||||
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
|
||||
seq_printf(seq,
|
||||
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
|
||||
|
@ -280,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
|
|||
atomic_read(&device->rs_pending_cnt),
|
||||
atomic_read(&device->unacked_cnt),
|
||||
atomic_read(&device->ap_bio_cnt),
|
||||
device->tconn->epochs,
|
||||
write_ordering_chars[device->tconn->write_ordering]
|
||||
device->connection->epochs,
|
||||
write_ordering_chars[device->connection->write_ordering]
|
||||
);
|
||||
seq_printf(seq, " oos:%llu\n",
|
||||
Bit2KB((unsigned long long)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -163,20 +163,21 @@ void drbd_req_destroy(struct kref *kref)
|
|||
mempool_free(req, drbd_request_mempool);
|
||||
}
|
||||
|
||||
static void wake_all_senders(struct drbd_tconn *tconn) {
|
||||
wake_up(&tconn->sender_work.q_wait);
|
||||
static void wake_all_senders(struct drbd_connection *connection)
|
||||
{
|
||||
wake_up(&connection->sender_work.q_wait);
|
||||
}
|
||||
|
||||
/* must hold resource->req_lock */
|
||||
void start_new_tl_epoch(struct drbd_tconn *tconn)
|
||||
void start_new_tl_epoch(struct drbd_connection *connection)
|
||||
{
|
||||
/* no point closing an epoch, if it is empty, anyways. */
|
||||
if (tconn->current_tle_writes == 0)
|
||||
if (connection->current_tle_writes == 0)
|
||||
return;
|
||||
|
||||
tconn->current_tle_writes = 0;
|
||||
atomic_inc(&tconn->current_tle_nr);
|
||||
wake_all_senders(tconn);
|
||||
connection->current_tle_writes = 0;
|
||||
atomic_inc(&connection->current_tle_nr);
|
||||
wake_all_senders(connection);
|
||||
}
|
||||
|
||||
void complete_master_bio(struct drbd_device *device,
|
||||
|
@ -273,8 +274,8 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
|
|||
* and reset the transfer log epoch write_cnt.
|
||||
*/
|
||||
if (rw == WRITE &&
|
||||
req->epoch == atomic_read(&device->tconn->current_tle_nr))
|
||||
start_new_tl_epoch(device->tconn);
|
||||
req->epoch == atomic_read(&device->connection->current_tle_nr))
|
||||
start_new_tl_epoch(device->connection);
|
||||
|
||||
/* Update disk stats */
|
||||
_drbd_end_io_acct(device, req);
|
||||
|
@ -476,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
* and from w_read_retry_remote */
|
||||
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
|
||||
rcu_read_lock();
|
||||
nc = rcu_dereference(device->tconn->net_conf);
|
||||
nc = rcu_dereference(device->connection->net_conf);
|
||||
p = nc->wire_protocol;
|
||||
rcu_read_unlock();
|
||||
req->rq_state |=
|
||||
|
@ -541,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
|
||||
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
|
||||
req->w.cb = w_send_read_req;
|
||||
drbd_queue_work(&device->tconn->sender_work, &req->w);
|
||||
drbd_queue_work(&device->connection->sender_work, &req->w);
|
||||
break;
|
||||
|
||||
case QUEUE_FOR_NET_WRITE:
|
||||
|
@ -576,22 +577,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
D_ASSERT(req->rq_state & RQ_NET_PENDING);
|
||||
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
|
||||
req->w.cb = w_send_dblock;
|
||||
drbd_queue_work(&device->tconn->sender_work, &req->w);
|
||||
drbd_queue_work(&device->connection->sender_work, &req->w);
|
||||
|
||||
/* close the epoch, in case it outgrew the limit */
|
||||
rcu_read_lock();
|
||||
nc = rcu_dereference(device->tconn->net_conf);
|
||||
nc = rcu_dereference(device->connection->net_conf);
|
||||
p = nc->max_epoch_size;
|
||||
rcu_read_unlock();
|
||||
if (device->tconn->current_tle_writes >= p)
|
||||
start_new_tl_epoch(device->tconn);
|
||||
if (device->connection->current_tle_writes >= p)
|
||||
start_new_tl_epoch(device->connection);
|
||||
|
||||
break;
|
||||
|
||||
case QUEUE_FOR_SEND_OOS:
|
||||
mod_rq_state(req, m, 0, RQ_NET_QUEUED);
|
||||
req->w.cb = w_send_out_of_sync;
|
||||
drbd_queue_work(&device->tconn->sender_work, &req->w);
|
||||
drbd_queue_work(&device->connection->sender_work, &req->w);
|
||||
break;
|
||||
|
||||
case READ_RETRY_REMOTE_CANCELED:
|
||||
|
@ -703,7 +704,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
|
||||
get_ldev(device); /* always succeeds in this call path */
|
||||
req->w.cb = w_restart_disk_io;
|
||||
drbd_queue_work(&device->tconn->sender_work, &req->w);
|
||||
drbd_queue_work(&device->connection->sender_work, &req->w);
|
||||
break;
|
||||
|
||||
case RESEND:
|
||||
|
@ -724,7 +725,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
|
||||
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
|
||||
if (req->w.cb) {
|
||||
drbd_queue_work(&device->tconn->sender_work, &req->w);
|
||||
drbd_queue_work(&device->connection->sender_work, &req->w);
|
||||
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
|
||||
} /* else: FIXME can this happen? */
|
||||
break;
|
||||
|
@ -756,7 +757,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
break;
|
||||
|
||||
case QUEUE_AS_DRBD_BARRIER:
|
||||
start_new_tl_epoch(device->tconn);
|
||||
start_new_tl_epoch(device->connection);
|
||||
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
|
||||
break;
|
||||
};
|
||||
|
@ -850,9 +851,9 @@ static void complete_conflicting_writes(struct drbd_request *req)
|
|||
break;
|
||||
/* Indicate to wake up device->misc_wait on progress. */
|
||||
i->waiting = true;
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
schedule();
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
}
|
||||
finish_wait(&device->misc_wait, &wait);
|
||||
}
|
||||
|
@ -860,17 +861,17 @@ static void complete_conflicting_writes(struct drbd_request *req)
|
|||
/* called within req_lock and rcu_read_lock() */
|
||||
static void maybe_pull_ahead(struct drbd_device *device)
|
||||
{
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
struct net_conf *nc;
|
||||
bool congested = false;
|
||||
enum drbd_on_congestion on_congestion;
|
||||
|
||||
rcu_read_lock();
|
||||
nc = rcu_dereference(tconn->net_conf);
|
||||
nc = rcu_dereference(connection->net_conf);
|
||||
on_congestion = nc ? nc->on_congestion : OC_BLOCK;
|
||||
rcu_read_unlock();
|
||||
if (on_congestion == OC_BLOCK ||
|
||||
tconn->agreed_pro_version < 96)
|
||||
connection->agreed_pro_version < 96)
|
||||
return;
|
||||
|
||||
/* If I don't even have good local storage, we can not reasonably try
|
||||
|
@ -893,7 +894,7 @@ static void maybe_pull_ahead(struct drbd_device *device)
|
|||
|
||||
if (congested) {
|
||||
/* start a new epoch for non-mirrored writes */
|
||||
start_new_tl_epoch(device->tconn);
|
||||
start_new_tl_epoch(device->connection);
|
||||
|
||||
if (on_congestion == OC_PULL_AHEAD)
|
||||
_drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
|
||||
|
@ -1077,7 +1078,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
|
|||
struct bio_and_error m = { NULL, };
|
||||
bool no_remote = false;
|
||||
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
if (rw == WRITE) {
|
||||
/* This may temporarily give up the req_lock,
|
||||
* but will re-aquire it before it returns here.
|
||||
|
@ -1111,15 +1112,15 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
|
|||
}
|
||||
|
||||
/* which transfer log epoch does this belong to? */
|
||||
req->epoch = atomic_read(&device->tconn->current_tle_nr);
|
||||
req->epoch = atomic_read(&device->connection->current_tle_nr);
|
||||
|
||||
/* no point in adding empty flushes to the transfer log,
|
||||
* they are mapped to drbd barriers already. */
|
||||
if (likely(req->i.size!=0)) {
|
||||
if (rw == WRITE)
|
||||
device->tconn->current_tle_writes++;
|
||||
device->connection->current_tle_writes++;
|
||||
|
||||
list_add_tail(&req->tl_requests, &device->tconn->transfer_log);
|
||||
list_add_tail(&req->tl_requests, &device->connection->transfer_log);
|
||||
}
|
||||
|
||||
if (rw == WRITE) {
|
||||
|
@ -1139,9 +1140,9 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
|
|||
/* needs to be marked within the same spinlock */
|
||||
_req_mod(req, TO_BE_SUBMITTED);
|
||||
/* but we need to give up the spinlock to submit */
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
drbd_submit_req_private_bio(req);
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
} else if (no_remote) {
|
||||
nodata:
|
||||
if (__ratelimit(&drbd_ratelimit_state))
|
||||
|
@ -1154,7 +1155,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
|
|||
out:
|
||||
if (drbd_req_put_completion_ref(req, &m, 1))
|
||||
kref_put(&req->kref, drbd_req_destroy);
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
|
||||
if (m.bio)
|
||||
complete_master_bio(device, &m);
|
||||
|
@ -1320,12 +1321,12 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
|
|||
return limit;
|
||||
}
|
||||
|
||||
static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
|
||||
static struct drbd_request *find_oldest_request(struct drbd_connection *connection)
|
||||
{
|
||||
/* Walk the transfer log,
|
||||
* and find the oldest not yet completed request */
|
||||
struct drbd_request *r;
|
||||
list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
|
||||
list_for_each_entry(r, &connection->transfer_log, tl_requests) {
|
||||
if (atomic_read(&r->completion_ref))
|
||||
return r;
|
||||
}
|
||||
|
@ -1335,14 +1336,14 @@ static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
|
|||
void request_timer_fn(unsigned long data)
|
||||
{
|
||||
struct drbd_device *device = (struct drbd_device *) data;
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
struct drbd_request *req; /* oldest request */
|
||||
struct net_conf *nc;
|
||||
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
|
||||
unsigned long now;
|
||||
|
||||
rcu_read_lock();
|
||||
nc = rcu_dereference(tconn->net_conf);
|
||||
nc = rcu_dereference(connection->net_conf);
|
||||
if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
|
||||
ent = nc->timeout * HZ/10 * nc->ko_count;
|
||||
|
||||
|
@ -1359,10 +1360,10 @@ void request_timer_fn(unsigned long data)
|
|||
|
||||
now = jiffies;
|
||||
|
||||
spin_lock_irq(&tconn->req_lock);
|
||||
req = find_oldest_request(tconn);
|
||||
spin_lock_irq(&connection->req_lock);
|
||||
req = find_oldest_request(connection);
|
||||
if (!req) {
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
mod_timer(&device->request_timer, now + et);
|
||||
return;
|
||||
}
|
||||
|
@ -1385,7 +1386,7 @@ void request_timer_fn(unsigned long data)
|
|||
*/
|
||||
if (ent && req->rq_state & RQ_NET_PENDING &&
|
||||
time_after(now, req->start_time + ent) &&
|
||||
!time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) {
|
||||
!time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
|
||||
dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
|
||||
_drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
|
||||
}
|
||||
|
@ -1396,6 +1397,6 @@ void request_timer_fn(unsigned long data)
|
|||
__drbd_chk_io_error(device, DRBD_FORCE_DETACH);
|
||||
}
|
||||
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
mod_timer(&device->request_timer, nt);
|
||||
}
|
||||
|
|
|
@ -275,7 +275,7 @@ struct bio_and_error {
|
|||
int error;
|
||||
};
|
||||
|
||||
extern void start_new_tl_epoch(struct drbd_tconn *tconn);
|
||||
extern void start_new_tl_epoch(struct drbd_connection *connection);
|
||||
extern void drbd_req_destroy(struct kref *kref);
|
||||
extern void _req_may_be_done(struct drbd_request *req,
|
||||
struct bio_and_error *m);
|
||||
|
@ -284,8 +284,8 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
|
|||
extern void complete_master_bio(struct drbd_device *device,
|
||||
struct bio_and_error *m);
|
||||
extern void request_timer_fn(unsigned long data);
|
||||
extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
|
||||
extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
|
||||
extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
|
||||
extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
|
||||
|
||||
/* this is in drbd_main.c */
|
||||
extern void drbd_restart_request(struct drbd_request *req);
|
||||
|
@ -318,9 +318,9 @@ static inline int req_mod(struct drbd_request *req,
|
|||
struct bio_and_error m;
|
||||
int rv;
|
||||
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
rv = __req_mod(req, what, &m);
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
|
||||
if (m.bio)
|
||||
complete_master_bio(device, &m);
|
||||
|
|
|
@ -51,7 +51,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused);
|
|||
static void after_state_ch(struct drbd_device *device, union drbd_state os,
|
||||
union drbd_state ns, enum chg_state_flags flags);
|
||||
static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
|
||||
static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *);
|
||||
static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
|
||||
static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
|
||||
static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
|
||||
enum sanitize_state_warnings *warn);
|
||||
|
@ -61,14 +61,14 @@ static inline bool is_susp(union drbd_state s)
|
|||
return s.susp || s.susp_nod || s.susp_fen;
|
||||
}
|
||||
|
||||
bool conn_all_vols_unconf(struct drbd_tconn *tconn)
|
||||
bool conn_all_vols_unconf(struct drbd_connection *connection)
|
||||
{
|
||||
struct drbd_device *device;
|
||||
bool rv = true;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr) {
|
||||
idr_for_each_entry(&connection->volumes, device, vnr) {
|
||||
if (device->state.disk != D_DISKLESS ||
|
||||
device->state.conn != C_STANDALONE ||
|
||||
device->state.role != R_SECONDARY) {
|
||||
|
@ -100,98 +100,98 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
|
|||
return R_PRIMARY;
|
||||
}
|
||||
|
||||
enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
|
||||
enum drbd_role conn_highest_role(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_role role = R_UNKNOWN;
|
||||
struct drbd_device *device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr)
|
||||
idr_for_each_entry(&connection->volumes, device, vnr)
|
||||
role = max_role(role, device->state.role);
|
||||
rcu_read_unlock();
|
||||
|
||||
return role;
|
||||
}
|
||||
|
||||
enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
|
||||
enum drbd_role conn_highest_peer(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_role peer = R_UNKNOWN;
|
||||
struct drbd_device *device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr)
|
||||
idr_for_each_entry(&connection->volumes, device, vnr)
|
||||
peer = max_role(peer, device->state.peer);
|
||||
rcu_read_unlock();
|
||||
|
||||
return peer;
|
||||
}
|
||||
|
||||
enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
|
||||
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_disk_state ds = D_DISKLESS;
|
||||
struct drbd_device *device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr)
|
||||
idr_for_each_entry(&connection->volumes, device, vnr)
|
||||
ds = max_t(enum drbd_disk_state, ds, device->state.disk);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ds;
|
||||
}
|
||||
|
||||
enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
|
||||
enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_disk_state ds = D_MASK;
|
||||
struct drbd_device *device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr)
|
||||
idr_for_each_entry(&connection->volumes, device, vnr)
|
||||
ds = min_t(enum drbd_disk_state, ds, device->state.disk);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ds;
|
||||
}
|
||||
|
||||
enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
|
||||
enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_disk_state ds = D_DISKLESS;
|
||||
struct drbd_device *device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr)
|
||||
idr_for_each_entry(&connection->volumes, device, vnr)
|
||||
ds = max_t(enum drbd_disk_state, ds, device->state.pdsk);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ds;
|
||||
}
|
||||
|
||||
enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
|
||||
enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_conns conn = C_MASK;
|
||||
struct drbd_device *device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr)
|
||||
idr_for_each_entry(&connection->volumes, device, vnr)
|
||||
conn = min_t(enum drbd_conns, conn, device->state.conn);
|
||||
rcu_read_unlock();
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
|
||||
static bool no_peer_wf_report_params(struct drbd_connection *connection)
|
||||
{
|
||||
struct drbd_device *device;
|
||||
int vnr;
|
||||
bool rv = true;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr)
|
||||
idr_for_each_entry(&connection->volumes, device, vnr)
|
||||
if (device->state.conn == C_WF_REPORT_PARAMS) {
|
||||
rv = false;
|
||||
break;
|
||||
|
@ -237,10 +237,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
|
|||
union drbd_state ns;
|
||||
enum drbd_state_rv rv;
|
||||
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
ns = apply_mask_val(drbd_read_state(device), mask, val);
|
||||
rv = _drbd_set_state(device, ns, f, NULL);
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
|
|||
if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
|
||||
return SS_CW_FAILED_BY_PEER;
|
||||
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
os = drbd_read_state(device);
|
||||
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
|
||||
rv = is_valid_transition(os, ns);
|
||||
|
@ -283,12 +283,12 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
|
|||
if (rv == SS_UNKNOWN_ERROR) {
|
||||
rv = is_valid_state(device, ns);
|
||||
if (rv >= SS_SUCCESS) {
|
||||
rv = is_valid_soft_transition(os, ns, device->tconn);
|
||||
rv = is_valid_soft_transition(os, ns, device->connection);
|
||||
if (rv >= SS_SUCCESS)
|
||||
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
@ -317,20 +317,20 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
|
|||
if (f & CS_SERIALIZE)
|
||||
mutex_lock(device->state_mutex);
|
||||
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
os = drbd_read_state(device);
|
||||
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
|
||||
rv = is_valid_transition(os, ns);
|
||||
if (rv < SS_SUCCESS) {
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
goto abort;
|
||||
}
|
||||
|
||||
if (cl_wide_st_chg(device, os, ns)) {
|
||||
rv = is_valid_state(device, ns);
|
||||
if (rv == SS_SUCCESS)
|
||||
rv = is_valid_soft_transition(os, ns, device->tconn);
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
rv = is_valid_soft_transition(os, ns, device->connection);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
|
||||
if (rv < SS_SUCCESS) {
|
||||
if (f & CS_VERBOSE)
|
||||
|
@ -353,17 +353,17 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
|
|||
print_st_err(device, os, ns, rv);
|
||||
goto abort;
|
||||
}
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
ns = apply_mask_val(drbd_read_state(device), mask, val);
|
||||
rv = _drbd_set_state(device, ns, f, &done);
|
||||
} else {
|
||||
rv = _drbd_set_state(device, ns, f, &done);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
|
||||
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
|
||||
D_ASSERT(current != device->tconn->worker.task);
|
||||
D_ASSERT(current != device->connection->worker.task);
|
||||
wait_for_completion(&done);
|
||||
}
|
||||
|
||||
|
@ -480,7 +480,7 @@ static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os
|
|||
dev_info(DEV, "%s\n", pb);
|
||||
}
|
||||
|
||||
static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
|
||||
static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
|
||||
enum chg_state_flags flags)
|
||||
{
|
||||
char pb[300];
|
||||
|
@ -494,7 +494,7 @@ static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os,
|
|||
is_susp(ns));
|
||||
|
||||
if (pbp != pb)
|
||||
conn_info(tconn, "%s\n", pb);
|
||||
conn_info(connection, "%s\n", pb);
|
||||
}
|
||||
|
||||
|
||||
|
@ -519,12 +519,12 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
|
|||
put_ldev(device);
|
||||
}
|
||||
|
||||
nc = rcu_dereference(device->tconn->net_conf);
|
||||
nc = rcu_dereference(device->connection->net_conf);
|
||||
if (nc) {
|
||||
if (!nc->two_primaries && ns.role == R_PRIMARY) {
|
||||
if (ns.peer == R_PRIMARY)
|
||||
rv = SS_TWO_PRIMARIES;
|
||||
else if (conn_highest_peer(device->tconn) == R_PRIMARY)
|
||||
else if (conn_highest_peer(device->connection) == R_PRIMARY)
|
||||
rv = SS_O_VOL_PEER_PRI;
|
||||
}
|
||||
}
|
||||
|
@ -565,7 +565,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
|
|||
rv = SS_NO_VERIFY_ALG;
|
||||
|
||||
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
|
||||
device->tconn->agreed_pro_version < 88)
|
||||
device->connection->agreed_pro_version < 88)
|
||||
rv = SS_NOT_SUPPORTED;
|
||||
|
||||
else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
|
||||
|
@ -592,7 +592,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
|
|||
* @os: old state.
|
||||
*/
|
||||
static enum drbd_state_rv
|
||||
is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn)
|
||||
is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
|
||||
{
|
||||
enum drbd_state_rv rv = SS_SUCCESS;
|
||||
|
||||
|
@ -620,7 +620,7 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t
|
|||
|
||||
/* While establishing a connection only allow cstate to change.
|
||||
Delay/refuse role changes, detach attach etc... */
|
||||
if (test_bit(STATE_SENT, &tconn->flags) &&
|
||||
if (test_bit(STATE_SENT, &connection->flags) &&
|
||||
!(os.conn == C_WF_REPORT_PARAMS ||
|
||||
(ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
|
||||
rv = SS_IN_TRANSIENT_STATE;
|
||||
|
@ -871,7 +871,7 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st
|
|||
(ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
|
||||
ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
|
||||
|
||||
if (device->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
|
||||
if (device->connection->res_opts.on_no_data == OND_SUSPEND_IO &&
|
||||
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
|
||||
ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
|
||||
|
||||
|
@ -899,7 +899,7 @@ void drbd_resume_al(struct drbd_device *device)
|
|||
/* helper for __drbd_set_state */
|
||||
static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
|
||||
{
|
||||
if (device->tconn->agreed_pro_version < 90)
|
||||
if (device->connection->agreed_pro_version < 90)
|
||||
device->ov_start_sector = 0;
|
||||
device->rs_total = drbd_bm_bits(device);
|
||||
device->ov_position = 0;
|
||||
|
@ -962,9 +962,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
|
|||
this happen...*/
|
||||
|
||||
if (is_valid_state(device, os) == rv)
|
||||
rv = is_valid_soft_transition(os, ns, device->tconn);
|
||||
rv = is_valid_soft_transition(os, ns, device->connection);
|
||||
} else
|
||||
rv = is_valid_soft_transition(os, ns, device->tconn);
|
||||
rv = is_valid_soft_transition(os, ns, device->connection);
|
||||
}
|
||||
|
||||
if (rv < SS_SUCCESS) {
|
||||
|
@ -981,7 +981,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
|
|||
sanitize_state(). Only display it here if we where not called from
|
||||
_conn_request_state() */
|
||||
if (!(flags & CS_DC_SUSP))
|
||||
conn_pr_state_change(device->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
|
||||
conn_pr_state_change(device->connection, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
|
||||
|
||||
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
|
||||
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
|
||||
|
@ -994,25 +994,25 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
|
|||
did_remote = drbd_should_do_remote(device->state);
|
||||
device->state.i = ns.i;
|
||||
should_do_remote = drbd_should_do_remote(device->state);
|
||||
device->tconn->susp = ns.susp;
|
||||
device->tconn->susp_nod = ns.susp_nod;
|
||||
device->tconn->susp_fen = ns.susp_fen;
|
||||
device->connection->susp = ns.susp;
|
||||
device->connection->susp_nod = ns.susp_nod;
|
||||
device->connection->susp_fen = ns.susp_fen;
|
||||
|
||||
/* put replicated vs not-replicated requests in seperate epochs */
|
||||
if (did_remote != should_do_remote)
|
||||
start_new_tl_epoch(device->tconn);
|
||||
start_new_tl_epoch(device->connection);
|
||||
|
||||
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
|
||||
drbd_print_uuids(device, "attached to UUIDs");
|
||||
|
||||
/* Wake up role changes, that were delayed because of connection establishing */
|
||||
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
|
||||
no_peer_wf_report_params(device->tconn))
|
||||
clear_bit(STATE_SENT, &device->tconn->flags);
|
||||
no_peer_wf_report_params(device->connection))
|
||||
clear_bit(STATE_SENT, &device->connection->flags);
|
||||
|
||||
wake_up(&device->misc_wait);
|
||||
wake_up(&device->state_wait);
|
||||
wake_up(&device->tconn->ping_wait);
|
||||
wake_up(&device->connection->ping_wait);
|
||||
|
||||
/* Aborted verify run, or we reached the stop sector.
|
||||
* Log the last position, unless end-of-device. */
|
||||
|
@ -1101,21 +1101,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
|
|||
|
||||
/* Receiver should clean up itself */
|
||||
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
|
||||
drbd_thread_stop_nowait(&device->tconn->receiver);
|
||||
drbd_thread_stop_nowait(&device->connection->receiver);
|
||||
|
||||
/* Now the receiver finished cleaning up itself, it should die */
|
||||
if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
|
||||
drbd_thread_stop_nowait(&device->tconn->receiver);
|
||||
drbd_thread_stop_nowait(&device->connection->receiver);
|
||||
|
||||
/* Upon network failure, we need to restart the receiver. */
|
||||
if (os.conn > C_WF_CONNECTION &&
|
||||
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
|
||||
drbd_thread_restart_nowait(&device->tconn->receiver);
|
||||
drbd_thread_restart_nowait(&device->connection->receiver);
|
||||
|
||||
/* Resume AL writing if we get a connection */
|
||||
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
|
||||
drbd_resume_al(device);
|
||||
device->tconn->connect_cnt++;
|
||||
device->connection->connect_cnt++;
|
||||
}
|
||||
|
||||
/* remember last attach time so request_timer_fn() won't
|
||||
|
@ -1133,7 +1133,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
|
|||
ascw->w.cb = w_after_state_ch;
|
||||
ascw->w.device = device;
|
||||
ascw->done = done;
|
||||
drbd_queue_work(&device->tconn->sender_work, &ascw->w);
|
||||
drbd_queue_work(&device->connection->sender_work, &ascw->w);
|
||||
} else {
|
||||
dev_err(DEV, "Could not kmalloc an ascw\n");
|
||||
}
|
||||
|
@ -1181,7 +1181,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
|
|||
{
|
||||
int rv;
|
||||
|
||||
D_ASSERT(current == device->tconn->worker.task);
|
||||
D_ASSERT(current == device->connection->worker.task);
|
||||
|
||||
/* open coded non-blocking drbd_suspend_io(device); */
|
||||
set_bit(SUSPEND_IO, &device->flags);
|
||||
|
@ -1228,47 +1228,47 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
|
|||
state change. This function might sleep */
|
||||
|
||||
if (ns.susp_nod) {
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
enum drbd_req_event what = NOTHING;
|
||||
|
||||
spin_lock_irq(&tconn->req_lock);
|
||||
if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED)
|
||||
spin_lock_irq(&connection->req_lock);
|
||||
if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
|
||||
what = RESEND;
|
||||
|
||||
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
|
||||
conn_lowest_disk(tconn) > D_NEGOTIATING)
|
||||
conn_lowest_disk(connection) > D_NEGOTIATING)
|
||||
what = RESTART_FROZEN_DISK_IO;
|
||||
|
||||
if (tconn->susp_nod && what != NOTHING) {
|
||||
_tl_restart(tconn, what);
|
||||
_conn_request_state(tconn,
|
||||
if (connection->susp_nod && what != NOTHING) {
|
||||
_tl_restart(connection, what);
|
||||
_conn_request_state(connection,
|
||||
(union drbd_state) { { .susp_nod = 1 } },
|
||||
(union drbd_state) { { .susp_nod = 0 } },
|
||||
CS_VERBOSE);
|
||||
}
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
}
|
||||
|
||||
if (ns.susp_fen) {
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
|
||||
spin_lock_irq(&tconn->req_lock);
|
||||
if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) {
|
||||
spin_lock_irq(&connection->req_lock);
|
||||
if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
|
||||
/* case2: The connection was established again: */
|
||||
struct drbd_device *odev;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, odev, vnr)
|
||||
idr_for_each_entry(&connection->volumes, odev, vnr)
|
||||
clear_bit(NEW_CUR_UUID, &odev->flags);
|
||||
rcu_read_unlock();
|
||||
_tl_restart(tconn, RESEND);
|
||||
_conn_request_state(tconn,
|
||||
_tl_restart(connection, RESEND);
|
||||
_conn_request_state(connection,
|
||||
(union drbd_state) { { .susp_fen = 1 } },
|
||||
(union drbd_state) { { .susp_fen = 0 } },
|
||||
CS_VERBOSE);
|
||||
}
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
}
|
||||
|
||||
/* Became sync source. With protocol >= 96, we still need to send out
|
||||
|
@ -1277,7 +1277,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
|
|||
* which is unexpected. */
|
||||
if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
|
||||
(ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
|
||||
device->tconn->agreed_pro_version >= 96 && get_ldev(device)) {
|
||||
device->connection->agreed_pro_version >= 96 && get_ldev(device)) {
|
||||
drbd_gen_and_send_sync_uuid(device);
|
||||
put_ldev(device);
|
||||
}
|
||||
|
@ -1526,7 +1526,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
|
|||
{
|
||||
struct after_conn_state_chg_work *acscw =
|
||||
container_of(w, struct after_conn_state_chg_work, w);
|
||||
struct drbd_tconn *tconn = w->tconn;
|
||||
struct drbd_connection *connection = w->connection;
|
||||
enum drbd_conns oc = acscw->oc;
|
||||
union drbd_state ns_max = acscw->ns_max;
|
||||
struct drbd_device *device;
|
||||
|
@ -1536,18 +1536,18 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
|
|||
|
||||
/* Upon network configuration, we need to start the receiver */
|
||||
if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
|
||||
drbd_thread_start(&tconn->receiver);
|
||||
drbd_thread_start(&connection->receiver);
|
||||
|
||||
if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
|
||||
struct net_conf *old_conf;
|
||||
|
||||
mutex_lock(&tconn->conf_update);
|
||||
old_conf = tconn->net_conf;
|
||||
tconn->my_addr_len = 0;
|
||||
tconn->peer_addr_len = 0;
|
||||
rcu_assign_pointer(tconn->net_conf, NULL);
|
||||
conn_free_crypto(tconn);
|
||||
mutex_unlock(&tconn->conf_update);
|
||||
mutex_lock(&connection->conf_update);
|
||||
old_conf = connection->net_conf;
|
||||
connection->my_addr_len = 0;
|
||||
connection->peer_addr_len = 0;
|
||||
rcu_assign_pointer(connection->net_conf, NULL);
|
||||
conn_free_crypto(connection);
|
||||
mutex_unlock(&connection->conf_update);
|
||||
|
||||
synchronize_rcu();
|
||||
kfree(old_conf);
|
||||
|
@ -1557,30 +1557,30 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
|
|||
/* case1: The outdate peer handler is successful: */
|
||||
if (ns_max.pdsk <= D_OUTDATED) {
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr) {
|
||||
idr_for_each_entry(&connection->volumes, device, vnr) {
|
||||
if (test_bit(NEW_CUR_UUID, &device->flags)) {
|
||||
drbd_uuid_new_current(device);
|
||||
clear_bit(NEW_CUR_UUID, &device->flags);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
spin_lock_irq(&tconn->req_lock);
|
||||
_tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
|
||||
_conn_request_state(tconn,
|
||||
spin_lock_irq(&connection->req_lock);
|
||||
_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
|
||||
_conn_request_state(connection,
|
||||
(union drbd_state) { { .susp_fen = 1 } },
|
||||
(union drbd_state) { { .susp_fen = 0 } },
|
||||
CS_VERBOSE);
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
}
|
||||
}
|
||||
kref_put(&tconn->kref, &conn_destroy);
|
||||
kref_put(&connection->kref, &conn_destroy);
|
||||
|
||||
conn_md_sync(tconn);
|
||||
conn_md_sync(connection);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
|
||||
void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
|
||||
{
|
||||
enum chg_state_flags flags = ~0;
|
||||
struct drbd_device *device;
|
||||
|
@ -1588,13 +1588,13 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
|
|||
union drbd_dev_state os, cs = {
|
||||
{ .role = R_SECONDARY,
|
||||
.peer = R_UNKNOWN,
|
||||
.conn = tconn->cstate,
|
||||
.conn = connection->cstate,
|
||||
.disk = D_DISKLESS,
|
||||
.pdsk = D_UNKNOWN,
|
||||
} };
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr) {
|
||||
idr_for_each_entry(&connection->volumes, device, vnr) {
|
||||
os = device->state;
|
||||
|
||||
if (first_vol) {
|
||||
|
@ -1626,7 +1626,7 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
|
|||
}
|
||||
|
||||
static enum drbd_state_rv
|
||||
conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
|
||||
conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
|
||||
enum chg_state_flags flags)
|
||||
{
|
||||
enum drbd_state_rv rv = SS_SUCCESS;
|
||||
|
@ -1635,7 +1635,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
|
|||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr) {
|
||||
idr_for_each_entry(&connection->volumes, device, vnr) {
|
||||
os = drbd_read_state(device);
|
||||
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
|
||||
|
||||
|
@ -1653,9 +1653,9 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
|
|||
rv = is_valid_state(device, ns);
|
||||
if (rv < SS_SUCCESS) {
|
||||
if (is_valid_state(device, os) == rv)
|
||||
rv = is_valid_soft_transition(os, ns, tconn);
|
||||
rv = is_valid_soft_transition(os, ns, connection);
|
||||
} else
|
||||
rv = is_valid_soft_transition(os, ns, tconn);
|
||||
rv = is_valid_soft_transition(os, ns, connection);
|
||||
}
|
||||
if (rv < SS_SUCCESS)
|
||||
break;
|
||||
|
@ -1669,7 +1669,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
|
|||
}
|
||||
|
||||
void
|
||||
conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
|
||||
conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
|
||||
union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
|
||||
{
|
||||
union drbd_state ns, os, ns_max = { };
|
||||
|
@ -1688,14 +1688,14 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
|
|||
/* remember last connect time so request_timer_fn() won't
|
||||
* kill newly established sessions while we are still trying to thaw
|
||||
* previously frozen IO */
|
||||
if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
|
||||
tconn->last_reconnect_jif = jiffies;
|
||||
if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
|
||||
connection->last_reconnect_jif = jiffies;
|
||||
|
||||
tconn->cstate = val.conn;
|
||||
connection->cstate = val.conn;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr) {
|
||||
idr_for_each_entry(&connection->volumes, device, vnr) {
|
||||
number_of_volumes++;
|
||||
os = drbd_read_state(device);
|
||||
ns = apply_mask_val(os, mask, val);
|
||||
|
@ -1733,39 +1733,39 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
|
|||
} };
|
||||
}
|
||||
|
||||
ns_min.susp = ns_max.susp = tconn->susp;
|
||||
ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
|
||||
ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
|
||||
ns_min.susp = ns_max.susp = connection->susp;
|
||||
ns_min.susp_nod = ns_max.susp_nod = connection->susp_nod;
|
||||
ns_min.susp_fen = ns_max.susp_fen = connection->susp_fen;
|
||||
|
||||
*pns_min = ns_min;
|
||||
*pns_max = ns_max;
|
||||
}
|
||||
|
||||
static enum drbd_state_rv
|
||||
_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
|
||||
_conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
|
||||
{
|
||||
enum drbd_state_rv rv;
|
||||
|
||||
if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
|
||||
if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
|
||||
return SS_CW_SUCCESS;
|
||||
|
||||
if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
|
||||
if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
|
||||
return SS_CW_FAILED_BY_PEER;
|
||||
|
||||
rv = conn_is_valid_transition(tconn, mask, val, 0);
|
||||
if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS)
|
||||
rv = conn_is_valid_transition(connection, mask, val, 0);
|
||||
if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
|
||||
rv = SS_UNKNOWN_ERROR; /* continue waiting */
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
enum drbd_state_rv
|
||||
_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
|
||||
_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
|
||||
enum chg_state_flags flags)
|
||||
{
|
||||
enum drbd_state_rv rv = SS_SUCCESS;
|
||||
struct after_conn_state_chg_work *acscw;
|
||||
enum drbd_conns oc = tconn->cstate;
|
||||
enum drbd_conns oc = connection->cstate;
|
||||
union drbd_state ns_max, ns_min, os;
|
||||
bool have_mutex = false;
|
||||
|
||||
|
@ -1775,7 +1775,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
|
|||
goto abort;
|
||||
}
|
||||
|
||||
rv = conn_is_valid_transition(tconn, mask, val, flags);
|
||||
rv = conn_is_valid_transition(connection, mask, val, flags);
|
||||
if (rv < SS_SUCCESS)
|
||||
goto abort;
|
||||
|
||||
|
@ -1785,38 +1785,38 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
|
|||
/* This will be a cluster-wide state change.
|
||||
* Need to give up the spinlock, grab the mutex,
|
||||
* then send the state change request, ... */
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
mutex_lock(&tconn->cstate_mutex);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
mutex_lock(&connection->cstate_mutex);
|
||||
have_mutex = true;
|
||||
|
||||
set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
|
||||
if (conn_send_state_req(tconn, mask, val)) {
|
||||
set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
|
||||
if (conn_send_state_req(connection, mask, val)) {
|
||||
/* sending failed. */
|
||||
clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
|
||||
clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
|
||||
rv = SS_CW_FAILED_BY_PEER;
|
||||
/* need to re-aquire the spin lock, though */
|
||||
goto abort_unlocked;
|
||||
}
|
||||
|
||||
if (val.conn == C_DISCONNECTING)
|
||||
set_bit(DISCONNECT_SENT, &tconn->flags);
|
||||
set_bit(DISCONNECT_SENT, &connection->flags);
|
||||
|
||||
/* ... and re-aquire the spinlock.
|
||||
* If _conn_rq_cond() returned >= SS_SUCCESS, we must call
|
||||
* conn_set_state() within the same spinlock. */
|
||||
spin_lock_irq(&tconn->req_lock);
|
||||
wait_event_lock_irq(tconn->ping_wait,
|
||||
(rv = _conn_rq_cond(tconn, mask, val)),
|
||||
tconn->req_lock);
|
||||
clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags);
|
||||
spin_lock_irq(&connection->req_lock);
|
||||
wait_event_lock_irq(connection->ping_wait,
|
||||
(rv = _conn_rq_cond(connection, mask, val)),
|
||||
connection->req_lock);
|
||||
clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
|
||||
if (rv < SS_SUCCESS)
|
||||
goto abort;
|
||||
}
|
||||
|
||||
conn_old_common_state(tconn, &os, &flags);
|
||||
conn_old_common_state(connection, &os, &flags);
|
||||
flags |= CS_DC_SUSP;
|
||||
conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
|
||||
conn_pr_state_change(tconn, os, ns_max, flags);
|
||||
conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
|
||||
conn_pr_state_change(connection, os, ns_max, flags);
|
||||
|
||||
acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
|
||||
if (acscw) {
|
||||
|
@ -1825,39 +1825,39 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
|
|||
acscw->ns_max = ns_max;
|
||||
acscw->flags = flags;
|
||||
acscw->w.cb = w_after_conn_state_ch;
|
||||
kref_get(&tconn->kref);
|
||||
acscw->w.tconn = tconn;
|
||||
drbd_queue_work(&tconn->sender_work, &acscw->w);
|
||||
kref_get(&connection->kref);
|
||||
acscw->w.connection = connection;
|
||||
drbd_queue_work(&connection->sender_work, &acscw->w);
|
||||
} else {
|
||||
conn_err(tconn, "Could not kmalloc an acscw\n");
|
||||
conn_err(connection, "Could not kmalloc an acscw\n");
|
||||
}
|
||||
|
||||
abort:
|
||||
if (have_mutex) {
|
||||
/* mutex_unlock() "... must not be used in interrupt context.",
|
||||
* so give up the spinlock, then re-aquire it */
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
abort_unlocked:
|
||||
mutex_unlock(&tconn->cstate_mutex);
|
||||
spin_lock_irq(&tconn->req_lock);
|
||||
mutex_unlock(&connection->cstate_mutex);
|
||||
spin_lock_irq(&connection->req_lock);
|
||||
}
|
||||
if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
|
||||
conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv));
|
||||
conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
|
||||
conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
|
||||
conn_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
|
||||
conn_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
|
||||
conn_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
enum drbd_state_rv
|
||||
conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
|
||||
conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
|
||||
enum chg_state_flags flags)
|
||||
{
|
||||
enum drbd_state_rv rv;
|
||||
|
||||
spin_lock_irq(&tconn->req_lock);
|
||||
rv = _conn_request_state(tconn, mask, val, flags);
|
||||
spin_unlock_irq(&tconn->req_lock);
|
||||
spin_lock_irq(&connection->req_lock);
|
||||
rv = _conn_request_state(connection, mask, val, flags);
|
||||
spin_unlock_irq(&connection->req_lock);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define DRBD_STATE_H
|
||||
|
||||
struct drbd_device;
|
||||
struct drbd_tconn;
|
||||
struct drbd_connection;
|
||||
|
||||
/**
|
||||
* DOC: DRBD State macros
|
||||
|
@ -124,15 +124,15 @@ extern void print_st_err(struct drbd_device *, union drbd_state,
|
|||
union drbd_state, int);
|
||||
|
||||
enum drbd_state_rv
|
||||
_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
|
||||
_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
|
||||
enum chg_state_flags flags);
|
||||
|
||||
enum drbd_state_rv
|
||||
conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
|
||||
conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
|
||||
enum chg_state_flags flags);
|
||||
|
||||
extern void drbd_resume_al(struct drbd_device *device);
|
||||
extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
|
||||
extern bool conn_all_vols_unconf(struct drbd_connection *connection);
|
||||
|
||||
/**
|
||||
* drbd_request_state() - Reqest a state change
|
||||
|
@ -151,11 +151,11 @@ static inline int drbd_request_state(struct drbd_device *device,
|
|||
return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
|
||||
}
|
||||
|
||||
enum drbd_role conn_highest_role(struct drbd_tconn *tconn);
|
||||
enum drbd_role conn_highest_peer(struct drbd_tconn *tconn);
|
||||
enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn);
|
||||
enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn);
|
||||
enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn);
|
||||
enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn);
|
||||
enum drbd_role conn_highest_role(struct drbd_connection *connection);
|
||||
enum drbd_role conn_highest_peer(struct drbd_connection *connection);
|
||||
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
|
||||
enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection);
|
||||
enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection);
|
||||
enum drbd_conns conn_lowest_conn(struct drbd_connection *connection);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -102,16 +102,16 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
|
|||
unsigned long flags = 0;
|
||||
struct drbd_device *device = peer_req->w.device;
|
||||
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
device->read_cnt += peer_req->i.size >> 9;
|
||||
list_del(&peer_req->w.list);
|
||||
if (list_empty(&device->read_ee))
|
||||
wake_up(&device->ee_wait);
|
||||
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
||||
__drbd_chk_io_error(device, DRBD_READ_ERROR);
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
|
||||
drbd_queue_work(&device->tconn->sender_work, &peer_req->w);
|
||||
drbd_queue_work(&device->connection->sender_work, &peer_req->w);
|
||||
put_ldev(device);
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
|
|||
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
|
||||
block_id = peer_req->block_id;
|
||||
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
device->writ_cnt += peer_req->i.size >> 9;
|
||||
list_move_tail(&peer_req->w.list, &device->done_ee);
|
||||
|
||||
|
@ -150,7 +150,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
|
|||
|
||||
if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
|
||||
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
|
||||
if (block_id == ID_SYNCER)
|
||||
drbd_rs_complete_io(device, i.sector);
|
||||
|
@ -161,7 +161,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
|
|||
if (do_al_complete_io)
|
||||
drbd_al_complete_io(device, &i);
|
||||
|
||||
wake_asender(device->tconn);
|
||||
wake_asender(device->connection);
|
||||
put_ldev(device);
|
||||
}
|
||||
|
||||
|
@ -273,9 +273,9 @@ void drbd_request_endio(struct bio *bio, int error)
|
|||
req->private_bio = ERR_PTR(error);
|
||||
|
||||
/* not req_mod(), we need irqsave here! */
|
||||
spin_lock_irqsave(&device->tconn->req_lock, flags);
|
||||
spin_lock_irqsave(&device->connection->req_lock, flags);
|
||||
__req_mod(req, what, &m);
|
||||
spin_unlock_irqrestore(&device->tconn->req_lock, flags);
|
||||
spin_unlock_irqrestore(&device->connection->req_lock, flags);
|
||||
put_ldev(device);
|
||||
|
||||
if (m.bio)
|
||||
|
@ -345,12 +345,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
|
|||
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
|
||||
goto out;
|
||||
|
||||
digest_size = crypto_hash_digestsize(device->tconn->csums_tfm);
|
||||
digest_size = crypto_hash_digestsize(device->connection->csums_tfm);
|
||||
digest = kmalloc(digest_size, GFP_NOIO);
|
||||
if (digest) {
|
||||
sector_t sector = peer_req->i.sector;
|
||||
unsigned int size = peer_req->i.size;
|
||||
drbd_csum_ee(device, device->tconn->csums_tfm, peer_req, digest);
|
||||
drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest);
|
||||
/* Free peer_req and pages before send.
|
||||
* In case we block on congestion, we could otherwise run into
|
||||
* some distributed deadlock, if the other side blocks on
|
||||
|
@ -397,9 +397,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
|
|||
goto defer;
|
||||
|
||||
peer_req->w.cb = w_e_send_csum;
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
list_add(&peer_req->w.list, &device->read_ee);
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
|
||||
atomic_add(size >> 9, &device->rs_sect_ev);
|
||||
if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
|
||||
|
@ -409,9 +409,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
|
|||
* because bio_add_page failed (probably broken lower level driver),
|
||||
* retry may or may not help.
|
||||
* If it does not, you may need to force disconnect. */
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
list_del(&peer_req->w.list);
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
defer:
|
||||
|
@ -439,7 +439,7 @@ void resync_timer_fn(unsigned long data)
|
|||
struct drbd_device *device = (struct drbd_device *) data;
|
||||
|
||||
if (list_empty(&device->resync_work.list))
|
||||
drbd_queue_work(&device->tconn->sender_work, &device->resync_work);
|
||||
drbd_queue_work(&device->connection->sender_work, &device->resync_work);
|
||||
}
|
||||
|
||||
static void fifo_set(struct fifo_buffer *fb, int value)
|
||||
|
@ -597,15 +597,15 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
|
|||
|
||||
for (i = 0; i < number; i++) {
|
||||
/* Stop generating RS requests, when half of the send buffer is filled */
|
||||
mutex_lock(&device->tconn->data.mutex);
|
||||
if (device->tconn->data.socket) {
|
||||
queued = device->tconn->data.socket->sk->sk_wmem_queued;
|
||||
sndbuf = device->tconn->data.socket->sk->sk_sndbuf;
|
||||
mutex_lock(&device->connection->data.mutex);
|
||||
if (device->connection->data.socket) {
|
||||
queued = device->connection->data.socket->sk->sk_wmem_queued;
|
||||
sndbuf = device->connection->data.socket->sk->sk_sndbuf;
|
||||
} else {
|
||||
queued = 1;
|
||||
sndbuf = 0;
|
||||
}
|
||||
mutex_unlock(&device->tconn->data.mutex);
|
||||
mutex_unlock(&device->connection->data.mutex);
|
||||
if (queued > sndbuf / 2)
|
||||
goto requeue;
|
||||
|
||||
|
@ -675,7 +675,7 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
|
|||
/* adjust very last sectors, in case we are oddly sized */
|
||||
if (sector + (size>>9) > capacity)
|
||||
size = (capacity-sector)<<9;
|
||||
if (device->tconn->agreed_pro_version >= 89 && device->tconn->csums_tfm) {
|
||||
if (device->connection->agreed_pro_version >= 89 && device->connection->csums_tfm) {
|
||||
switch (read_for_csum(device, sector, size)) {
|
||||
case -EIO: /* Disk failure */
|
||||
put_ldev(device);
|
||||
|
@ -800,12 +800,12 @@ static int w_resync_finished(struct drbd_work *w, int cancel)
|
|||
|
||||
static void ping_peer(struct drbd_device *device)
|
||||
{
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
|
||||
clear_bit(GOT_PING_ACK, &tconn->flags);
|
||||
request_ping(tconn);
|
||||
wait_event(tconn->ping_wait,
|
||||
test_bit(GOT_PING_ACK, &tconn->flags) || device->state.conn < C_CONNECTED);
|
||||
clear_bit(GOT_PING_ACK, &connection->flags);
|
||||
request_ping(connection);
|
||||
wait_event(connection->ping_wait,
|
||||
test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
|
||||
}
|
||||
|
||||
int drbd_resync_finished(struct drbd_device *device)
|
||||
|
@ -831,7 +831,7 @@ int drbd_resync_finished(struct drbd_device *device)
|
|||
if (w) {
|
||||
w->cb = w_resync_finished;
|
||||
w->device = device;
|
||||
drbd_queue_work(&device->tconn->sender_work, w);
|
||||
drbd_queue_work(&device->connection->sender_work, w);
|
||||
return 1;
|
||||
}
|
||||
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
|
||||
|
@ -854,7 +854,7 @@ int drbd_resync_finished(struct drbd_device *device)
|
|||
|
||||
ping_peer(device);
|
||||
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
os = drbd_read_state(device);
|
||||
|
||||
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
|
||||
|
@ -885,7 +885,7 @@ int drbd_resync_finished(struct drbd_device *device)
|
|||
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
|
||||
khelper_cmd = "after-resync-target";
|
||||
|
||||
if (device->tconn->csums_tfm && device->rs_total) {
|
||||
if (device->connection->csums_tfm && device->rs_total) {
|
||||
const unsigned long s = device->rs_same_csum;
|
||||
const unsigned long t = device->rs_total;
|
||||
const int ratio =
|
||||
|
@ -943,7 +943,7 @@ int drbd_resync_finished(struct drbd_device *device)
|
|||
|
||||
_drbd_set_state(device, ns, CS_VERBOSE, NULL);
|
||||
out_unlock:
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
put_ldev(device);
|
||||
out:
|
||||
device->rs_total = 0;
|
||||
|
@ -970,9 +970,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
|
|||
int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||
atomic_add(i, &device->pp_in_use_by_net);
|
||||
atomic_sub(i, &device->pp_in_use);
|
||||
spin_lock_irq(&device->tconn->req_lock);
|
||||
spin_lock_irq(&device->connection->req_lock);
|
||||
list_add_tail(&peer_req->w.list, &device->net_ee);
|
||||
spin_unlock_irq(&device->tconn->req_lock);
|
||||
spin_unlock_irq(&device->connection->req_lock);
|
||||
wake_up(&drbd_pp_wait);
|
||||
} else
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
|
@ -1096,13 +1096,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
|
|||
/* quick hack to try to avoid a race against reconfiguration.
|
||||
* a real fix would be much more involved,
|
||||
* introducing more locking mechanisms */
|
||||
if (device->tconn->csums_tfm) {
|
||||
digest_size = crypto_hash_digestsize(device->tconn->csums_tfm);
|
||||
if (device->connection->csums_tfm) {
|
||||
digest_size = crypto_hash_digestsize(device->connection->csums_tfm);
|
||||
D_ASSERT(digest_size == di->digest_size);
|
||||
digest = kmalloc(digest_size, GFP_NOIO);
|
||||
}
|
||||
if (digest) {
|
||||
drbd_csum_ee(device, device->tconn->csums_tfm, peer_req, digest);
|
||||
drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest);
|
||||
eq = !memcmp(digest, di->digest, digest_size);
|
||||
kfree(digest);
|
||||
}
|
||||
|
@ -1146,7 +1146,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
|
|||
if (unlikely(cancel))
|
||||
goto out;
|
||||
|
||||
digest_size = crypto_hash_digestsize(device->tconn->verify_tfm);
|
||||
digest_size = crypto_hash_digestsize(device->connection->verify_tfm);
|
||||
digest = kmalloc(digest_size, GFP_NOIO);
|
||||
if (!digest) {
|
||||
err = 1; /* terminate the connection in case the allocation failed */
|
||||
|
@ -1154,7 +1154,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
|
|||
}
|
||||
|
||||
if (likely(!(peer_req->flags & EE_WAS_ERROR)))
|
||||
drbd_csum_ee(device, device->tconn->verify_tfm, peer_req, digest);
|
||||
drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest);
|
||||
else
|
||||
memset(digest, 0, digest_size);
|
||||
|
||||
|
@ -1217,10 +1217,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
|
|||
di = peer_req->digest;
|
||||
|
||||
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
||||
digest_size = crypto_hash_digestsize(device->tconn->verify_tfm);
|
||||
digest_size = crypto_hash_digestsize(device->connection->verify_tfm);
|
||||
digest = kmalloc(digest_size, GFP_NOIO);
|
||||
if (digest) {
|
||||
drbd_csum_ee(device, device->tconn->verify_tfm, peer_req, digest);
|
||||
drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest);
|
||||
|
||||
D_ASSERT(digest_size == di->digest_size);
|
||||
eq = !memcmp(digest, di->digest, digest_size);
|
||||
|
@ -1274,20 +1274,20 @@ int w_prev_work_done(struct drbd_work *w, int cancel)
|
|||
* and to be able to wait for them.
|
||||
* See also comment in drbd_adm_attach before drbd_suspend_io.
|
||||
*/
|
||||
static int drbd_send_barrier(struct drbd_tconn *tconn)
|
||||
static int drbd_send_barrier(struct drbd_connection *connection)
|
||||
{
|
||||
struct p_barrier *p;
|
||||
struct drbd_socket *sock;
|
||||
|
||||
sock = &tconn->data;
|
||||
p = conn_prepare_command(tconn, sock);
|
||||
sock = &connection->data;
|
||||
p = conn_prepare_command(connection, sock);
|
||||
if (!p)
|
||||
return -EIO;
|
||||
p->barrier = tconn->send.current_epoch_nr;
|
||||
p->barrier = connection->send.current_epoch_nr;
|
||||
p->pad = 0;
|
||||
tconn->send.current_epoch_writes = 0;
|
||||
connection->send.current_epoch_writes = 0;
|
||||
|
||||
return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0);
|
||||
return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
|
||||
}
|
||||
|
||||
int w_send_write_hint(struct drbd_work *w, int cancel)
|
||||
|
@ -1297,30 +1297,30 @@ int w_send_write_hint(struct drbd_work *w, int cancel)
|
|||
|
||||
if (cancel)
|
||||
return 0;
|
||||
sock = &device->tconn->data;
|
||||
sock = &device->connection->data;
|
||||
if (!drbd_prepare_command(device, sock))
|
||||
return -EIO;
|
||||
return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
|
||||
}
|
||||
|
||||
static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch)
|
||||
static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
|
||||
{
|
||||
if (!tconn->send.seen_any_write_yet) {
|
||||
tconn->send.seen_any_write_yet = true;
|
||||
tconn->send.current_epoch_nr = epoch;
|
||||
tconn->send.current_epoch_writes = 0;
|
||||
if (!connection->send.seen_any_write_yet) {
|
||||
connection->send.seen_any_write_yet = true;
|
||||
connection->send.current_epoch_nr = epoch;
|
||||
connection->send.current_epoch_writes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch)
|
||||
static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
|
||||
{
|
||||
/* re-init if first write on this connection */
|
||||
if (!tconn->send.seen_any_write_yet)
|
||||
if (!connection->send.seen_any_write_yet)
|
||||
return;
|
||||
if (tconn->send.current_epoch_nr != epoch) {
|
||||
if (tconn->send.current_epoch_writes)
|
||||
drbd_send_barrier(tconn);
|
||||
tconn->send.current_epoch_nr = epoch;
|
||||
if (connection->send.current_epoch_nr != epoch) {
|
||||
if (connection->send.current_epoch_writes)
|
||||
drbd_send_barrier(connection);
|
||||
connection->send.current_epoch_nr = epoch;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1328,7 +1328,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
|
|||
{
|
||||
struct drbd_request *req = container_of(w, struct drbd_request, w);
|
||||
struct drbd_device *device = w->device;
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
int err;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
|
@ -1336,11 +1336,11 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* this time, no tconn->send.current_epoch_writes++;
|
||||
/* this time, no connection->send.current_epoch_writes++;
|
||||
* If it was sent, it was the closing barrier for the last
|
||||
* replicated epoch, before we went into AHEAD mode.
|
||||
* No more barriers will be sent, until we leave AHEAD mode again. */
|
||||
maybe_send_barrier(tconn, req->epoch);
|
||||
maybe_send_barrier(connection, req->epoch);
|
||||
|
||||
err = drbd_send_out_of_sync(device, req);
|
||||
req_mod(req, OOS_HANDED_TO_NETWORK);
|
||||
|
@ -1358,7 +1358,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
|
|||
{
|
||||
struct drbd_request *req = container_of(w, struct drbd_request, w);
|
||||
struct drbd_device *device = w->device;
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
int err;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
|
@ -1366,9 +1366,9 @@ int w_send_dblock(struct drbd_work *w, int cancel)
|
|||
return 0;
|
||||
}
|
||||
|
||||
re_init_if_first_write(tconn, req->epoch);
|
||||
maybe_send_barrier(tconn, req->epoch);
|
||||
tconn->send.current_epoch_writes++;
|
||||
re_init_if_first_write(connection, req->epoch);
|
||||
maybe_send_barrier(connection, req->epoch);
|
||||
connection->send.current_epoch_writes++;
|
||||
|
||||
err = drbd_send_dblock(device, req);
|
||||
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
|
||||
|
@ -1386,7 +1386,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
|
|||
{
|
||||
struct drbd_request *req = container_of(w, struct drbd_request, w);
|
||||
struct drbd_device *device = w->device;
|
||||
struct drbd_tconn *tconn = device->tconn;
|
||||
struct drbd_connection *connection = device->connection;
|
||||
int err;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
|
@ -1396,7 +1396,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
|
|||
|
||||
/* Even read requests may close a write epoch,
|
||||
* if there was any yet. */
|
||||
maybe_send_barrier(tconn, req->epoch);
|
||||
maybe_send_barrier(connection, req->epoch);
|
||||
|
||||
err = drbd_send_drequest(device, P_DATA_REQUEST, req->i.sector, req->i.size,
|
||||
(unsigned long)req);
|
||||
|
@ -1581,7 +1581,7 @@ void start_resync_timer_fn(unsigned long data)
|
|||
{
|
||||
struct drbd_device *device = (struct drbd_device *) data;
|
||||
|
||||
drbd_queue_work(&device->tconn->sender_work, &device->start_resync_work);
|
||||
drbd_queue_work(&device->connection->sender_work, &device->start_resync_work);
|
||||
}
|
||||
|
||||
int w_start_resync(struct drbd_work *w, int cancel)
|
||||
|
@ -1628,7 +1628,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
|
|||
if (r > 0) {
|
||||
dev_info(DEV, "before-resync-target handler returned %d, "
|
||||
"dropping connection.\n", r);
|
||||
conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
|
||||
conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
|
||||
return;
|
||||
}
|
||||
} else /* C_SYNC_SOURCE */ {
|
||||
|
@ -1641,14 +1641,14 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
|
|||
} else {
|
||||
dev_info(DEV, "before-resync-source handler returned %d, "
|
||||
"dropping connection.\n", r);
|
||||
conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
|
||||
conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (current == device->tconn->worker.task) {
|
||||
if (current == device->connection->worker.task) {
|
||||
/* The worker should not sleep waiting for state_mutex,
|
||||
that can take long */
|
||||
if (!mutex_trylock(device->state_mutex)) {
|
||||
|
@ -1727,10 +1727,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
|
|||
* drbd_resync_finished from here in that case.
|
||||
* We drbd_gen_and_send_sync_uuid here for protocol < 96,
|
||||
* and from after_state_ch otherwise. */
|
||||
if (side == C_SYNC_SOURCE && device->tconn->agreed_pro_version < 96)
|
||||
if (side == C_SYNC_SOURCE && device->connection->agreed_pro_version < 96)
|
||||
drbd_gen_and_send_sync_uuid(device);
|
||||
|
||||
if (device->tconn->agreed_pro_version < 95 && device->rs_total == 0) {
|
||||
if (device->connection->agreed_pro_version < 95 && device->rs_total == 0) {
|
||||
/* This still has a race (about when exactly the peers
|
||||
* detect connection loss) that can lead to a full sync
|
||||
* on next handshake. In 8.3.9 we fixed this with explicit
|
||||
|
@ -1746,7 +1746,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
|
|||
int timeo;
|
||||
|
||||
rcu_read_lock();
|
||||
nc = rcu_dereference(device->tconn->net_conf);
|
||||
nc = rcu_dereference(device->connection->net_conf);
|
||||
timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
|
||||
rcu_read_unlock();
|
||||
schedule_timeout_interruptible(timeo);
|
||||
|
@ -1772,7 +1772,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
|
|||
* (because we have not yet seen new requests), we should send the
|
||||
* corresponding barrier now. Must be checked within the same spinlock
|
||||
* that is used to check for new requests. */
|
||||
static bool need_to_send_barrier(struct drbd_tconn *connection)
|
||||
static bool need_to_send_barrier(struct drbd_connection *connection)
|
||||
{
|
||||
if (!connection->send.seen_any_write_yet)
|
||||
return false;
|
||||
|
@ -1813,7 +1813,7 @@ static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *w
|
|||
return !list_empty(work_list);
|
||||
}
|
||||
|
||||
static void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
|
||||
static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
struct net_conf *nc;
|
||||
|
@ -1884,7 +1884,7 @@ static void wait_for_work(struct drbd_tconn *connection, struct list_head *work_
|
|||
|
||||
int drbd_worker(struct drbd_thread *thi)
|
||||
{
|
||||
struct drbd_tconn *tconn = thi->tconn;
|
||||
struct drbd_connection *connection = thi->connection;
|
||||
struct drbd_work *w = NULL;
|
||||
struct drbd_device *device;
|
||||
LIST_HEAD(work_list);
|
||||
|
@ -1896,12 +1896,12 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
/* as long as we use drbd_queue_work_front(),
|
||||
* we may only dequeue single work items here, not batches. */
|
||||
if (list_empty(&work_list))
|
||||
wait_for_work(tconn, &work_list);
|
||||
wait_for_work(connection, &work_list);
|
||||
|
||||
if (signal_pending(current)) {
|
||||
flush_signals(current);
|
||||
if (get_t_state(thi) == RUNNING) {
|
||||
conn_warn(tconn, "Worker got an unexpected signal\n");
|
||||
conn_warn(connection, "Worker got an unexpected signal\n");
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
|
@ -1913,10 +1913,10 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
while (!list_empty(&work_list)) {
|
||||
w = list_first_entry(&work_list, struct drbd_work, list);
|
||||
list_del_init(&w->list);
|
||||
if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0)
|
||||
if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
|
||||
continue;
|
||||
if (tconn->cstate >= C_WF_REPORT_PARAMS)
|
||||
conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
|
||||
if (connection->cstate >= C_WF_REPORT_PARAMS)
|
||||
conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1926,11 +1926,11 @@ int drbd_worker(struct drbd_thread *thi)
|
|||
list_del_init(&w->list);
|
||||
w->cb(w, 1);
|
||||
}
|
||||
dequeue_work_batch(&tconn->sender_work, &work_list);
|
||||
dequeue_work_batch(&connection->sender_work, &work_list);
|
||||
} while (!list_empty(&work_list));
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&tconn->volumes, device, vnr) {
|
||||
idr_for_each_entry(&connection->volumes, device, vnr) {
|
||||
D_ASSERT(device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
|
||||
kref_get(&device->kref);
|
||||
rcu_read_unlock();
|
||||
|
|
Loading…
Reference in a new issue