GFS2: Add origin indicator to glock callbacks
This patch adds a bool indicating whether the demote request was originated locally or remotely. This is then used by the iopen ->go_callback() to make 100% sure that it will only respond to remote callbacks. Since ->evict_inode() uses GL_NOCACHE when it attempts to get an exclusive lock on the iopen lock, this may result in extra scheduling of the workqueue in case that the exclusive promotion request failed. This patch prevents that from happening. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
16ca9412d8
commit
81ffbf654f
3 changed files with 9 additions and 9 deletions
|
@ -912,7 +912,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
|
|||
*/
|
||||
|
||||
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
|
||||
unsigned long delay)
|
||||
unsigned long delay, bool remote)
|
||||
{
|
||||
int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
|
||||
|
||||
|
@ -925,7 +925,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
|
|||
gl->gl_demote_state = LM_ST_UNLOCKED;
|
||||
}
|
||||
if (gl->gl_ops->go_callback)
|
||||
gl->gl_ops->go_callback(gl);
|
||||
gl->gl_ops->go_callback(gl, remote);
|
||||
trace_gfs2_demote_rq(gl);
|
||||
}
|
||||
|
||||
|
@ -1091,7 +1091,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
|||
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (gh->gh_flags & GL_NOCACHE)
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0);
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
||||
|
||||
list_del_init(&gh->gh_list);
|
||||
if (find_first_holder(gl) == NULL) {
|
||||
|
@ -1296,7 +1296,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
|
|||
}
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
handle_callback(gl, state, delay);
|
||||
handle_callback(gl, state, delay, true);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
|
@ -1409,7 +1409,7 @@ __acquires(&lru_lock)
|
|||
spin_unlock(&lru_lock);
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (demote_ok(gl))
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0);
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
||||
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
|
||||
smp_mb__after_clear_bit();
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
|
@ -1534,7 +1534,7 @@ static void clear_glock(struct gfs2_glock *gl)
|
|||
|
||||
spin_lock(&gl->gl_spin);
|
||||
if (gl->gl_state != LM_ST_UNLOCKED)
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0);
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
gfs2_glock_hold(gl);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
|
|
|
@ -515,12 +515,12 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
|
|||
*
|
||||
* gl_spin lock is held while calling this
|
||||
*/
|
||||
static void iopen_go_callback(struct gfs2_glock *gl)
|
||||
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
||||
{
|
||||
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
|
||||
if (sdp->sd_vfs->s_flags & MS_RDONLY)
|
||||
if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
|
||||
return;
|
||||
|
||||
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
||||
|
|
|
@ -210,7 +210,7 @@ struct gfs2_glock_operations {
|
|||
int (*go_lock) (struct gfs2_holder *gh);
|
||||
void (*go_unlock) (struct gfs2_holder *gh);
|
||||
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
|
||||
void (*go_callback) (struct gfs2_glock *gl);
|
||||
void (*go_callback)(struct gfs2_glock *gl, bool remote);
|
||||
const int go_type;
|
||||
const unsigned long go_flags;
|
||||
#define GLOF_ASPACE 1
|
||||
|
|
Loading…
Reference in a new issue