Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: [GFS2] Fix GFS2's use of do_div() in its quota calculations [GFS2] Remove unused declaration [GFS2] Remove support for unused and pointless flag [GFS2] Replace rgrp "recent list" with mru list [GFS2] Allow local DF locks when holding a cached EX glock [GFS2] Fix delayed demote race [GFS2] don't call permission() [GFS2] Fix module building [GFS2] Glock documentation [GFS2] Remove all_list from lock_dlm [GFS2] Remove obsolete conversion deadlock avoidance code [GFS2] Remove remote lock dropping code [GFS2] kernel panic mounting volume [GFS2] Revise readpage locking [GFS2] Fix ordering of args for list_add [GFS2] trivial sparse lock annotations [GFS2] No lock_nolock [GFS2] Fix ordering bug in lock_dlm [GFS2] Clean up the glock core
This commit is contained in:
commit
38c46578ff
34 changed files with 1278 additions and 1955 deletions
114
Documentation/filesystems/gfs2-glocks.txt
Normal file
114
Documentation/filesystems/gfs2-glocks.txt
Normal file
|
@ -0,0 +1,114 @@
|
|||
Glock internal locking rules
|
||||
------------------------------
|
||||
|
||||
This documents the basic principles of the glock state machine
|
||||
internals. Each glock (struct gfs2_glock in fs/gfs2/incore.h)
|
||||
has two main (internal) locks:
|
||||
|
||||
1. A spinlock (gl_spin) which protects the internal state such
|
||||
as gl_state, gl_target and the list of holders (gl_holders)
|
||||
2. A non-blocking bit lock, GLF_LOCK, which is used to prevent other
|
||||
threads from making calls to the DLM, etc. at the same time. If a
|
||||
thread takes this lock, it must then call run_queue (usually via the
|
||||
workqueue) when it releases it in order to ensure any pending tasks
|
||||
are completed.
|
||||
|
||||
The gl_holders list contains all the queued lock requests (not
|
||||
just the holders) associated with the glock. If there are any
|
||||
held locks, then they will be contiguous entries at the head
|
||||
of the list. Locks are granted in strictly the order that they
|
||||
are queued, except for those marked LM_FLAG_PRIORITY which are
|
||||
used only during recovery, and even then only for journal locks.
|
||||
|
||||
There are three lock states that users of the glock layer can request,
|
||||
namely shared (SH), deferred (DF) and exclusive (EX). Those translate
|
||||
to the following DLM lock modes:
|
||||
|
||||
Glock mode | DLM lock mode
|
||||
------------------------------
|
||||
UN | IV/NL Unlocked (no DLM lock associated with glock) or NL
|
||||
SH | PR (Protected read)
|
||||
DF | CW (Concurrent write)
|
||||
EX | EX (Exclusive)
|
||||
|
||||
Thus DF is basically a shared mode which is incompatible with the "normal"
|
||||
shared lock mode, SH. In GFS2 the DF mode is used exclusively for direct I/O
|
||||
operations. The glocks are basically a lock plus some routines which deal
|
||||
with cache management. The following rules apply for the cache:
|
||||
|
||||
Glock mode | Cache data | Cache Metadata | Dirty Data | Dirty Metadata
|
||||
--------------------------------------------------------------------------
|
||||
UN | No | No | No | No
|
||||
SH | Yes | Yes | No | No
|
||||
DF | No | Yes | No | No
|
||||
EX | Yes | Yes | Yes | Yes
|
||||
|
||||
These rules are implemented using the various glock operations which
|
||||
are defined for each type of glock. Not all types of glocks use
|
||||
all the modes. Only inode glocks use the DF mode for example.
|
||||
|
||||
Table of glock operations and per type constants:
|
||||
|
||||
Field | Purpose
|
||||
----------------------------------------------------------------------------
|
||||
go_xmote_th | Called before remote state change (e.g. to sync dirty data)
|
||||
go_xmote_bh | Called after remote state change (e.g. to refill cache)
|
||||
go_inval | Called if remote state change requires invalidating the cache
|
||||
go_demote_ok | Returns boolean value of whether its ok to demote a glock
|
||||
| (e.g. checks timeout, and that there is no cached data)
|
||||
go_lock | Called for the first local holder of a lock
|
||||
go_unlock | Called on the final local unlock of a lock
|
||||
go_dump | Called to print content of object for debugfs file, or on
|
||||
| error to dump glock to the log.
|
||||
go_type; | The type of the glock, LM_TYPE_.....
|
||||
go_min_hold_time | The minimum hold time
|
||||
|
||||
The minimum hold time for each lock is the time after a remote lock
|
||||
grant for which we ignore remote demote requests. This is in order to
|
||||
prevent a situation where locks are being bounced around the cluster
|
||||
from node to node with none of the nodes making any progress. This
|
||||
tends to show up most with shared mmaped files which are being written
|
||||
to by multiple nodes. By delaying the demotion in response to a
|
||||
remote callback, that gives the userspace program time to make
|
||||
some progress before the pages are unmapped.
|
||||
|
||||
There is a plan to try and remove the go_lock and go_unlock callbacks
|
||||
if possible, in order to try and speed up the fast path though the locking.
|
||||
Also, eventually we hope to make the glock "EX" mode locally shared
|
||||
such that any local locking will be done with the i_mutex as required
|
||||
rather than via the glock.
|
||||
|
||||
Locking rules for glock operations:
|
||||
|
||||
Operation | GLF_LOCK bit lock held | gl_spin spinlock held
|
||||
-----------------------------------------------------------------
|
||||
go_xmote_th | Yes | No
|
||||
go_xmote_bh | Yes | No
|
||||
go_inval | Yes | No
|
||||
go_demote_ok | Sometimes | Yes
|
||||
go_lock | Yes | No
|
||||
go_unlock | Yes | No
|
||||
go_dump | Sometimes | Yes
|
||||
|
||||
N.B. Operations must not drop either the bit lock or the spinlock
|
||||
if its held on entry. go_dump and do_demote_ok must never block.
|
||||
Note that go_dump will only be called if the glock's state
|
||||
indicates that it is caching uptodate data.
|
||||
|
||||
Glock locking order within GFS2:
|
||||
|
||||
1. i_mutex (if required)
|
||||
2. Rename glock (for rename only)
|
||||
3. Inode glock(s)
|
||||
(Parents before children, inodes at "same level" with same parent in
|
||||
lock number order)
|
||||
4. Rgrp glock(s) (for (de)allocation operations)
|
||||
5. Transaction glock (via gfs2_trans_begin) for non-read operations
|
||||
6. Page lock (always last, very important!)
|
||||
|
||||
There are two glocks per inode. One deals with access to the inode
|
||||
itself (locking order as above), and the other, known as the iopen
|
||||
glock is used in conjunction with the i_nlink field in the inode to
|
||||
determine the lifetime of the inode in question. Locking of inodes
|
||||
is on a per-inode basis. Locking of rgrps is on a per rgrp basis.
|
||||
|
|
@ -14,23 +14,11 @@ config GFS2_FS
|
|||
GFS is perfect consistency -- changes made to the filesystem on one
|
||||
machine show up immediately on all other machines in the cluster.
|
||||
|
||||
To use the GFS2 filesystem, you will need to enable one or more of
|
||||
the below locking modules. Documentation and utilities for GFS2 can
|
||||
To use the GFS2 filesystem in a cluster, you will need to enable
|
||||
the locking module below. Documentation and utilities for GFS2 can
|
||||
be found here: http://sources.redhat.com/cluster
|
||||
|
||||
config GFS2_FS_LOCKING_NOLOCK
|
||||
tristate "GFS2 \"nolock\" locking module"
|
||||
depends on GFS2_FS
|
||||
help
|
||||
Single node locking module for GFS2.
|
||||
|
||||
Use this module if you want to use GFS2 on a single node without
|
||||
its clustering features. You can still take advantage of the
|
||||
large file support, and upgrade to running a full cluster later on
|
||||
if required.
|
||||
|
||||
If you will only be using GFS2 in cluster mode, you do not need this
|
||||
module.
|
||||
The "nolock" lock module is now built in to GFS2 by default.
|
||||
|
||||
config GFS2_FS_LOCKING_DLM
|
||||
tristate "GFS2 DLM locking module"
|
||||
|
|
|
@ -5,6 +5,5 @@ gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
|
|||
ops_fstype.o ops_inode.o ops_super.o quota.o \
|
||||
recovery.o rgrp.o super.o sys.o trans.o util.o
|
||||
|
||||
obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += locking/nolock/
|
||||
obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/
|
||||
|
||||
|
|
|
@ -15,11 +15,6 @@ enum {
|
|||
CREATE = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
NO_WAIT = 0,
|
||||
WAIT = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
NO_FORCE = 0,
|
||||
FORCE = 1,
|
||||
|
|
1643
fs/gfs2/glock.c
1643
fs/gfs2/glock.c
File diff suppressed because it is too large
Load diff
|
@ -26,11 +26,8 @@
|
|||
#define GL_SKIP 0x00000100
|
||||
#define GL_ATIME 0x00000200
|
||||
#define GL_NOCACHE 0x00000400
|
||||
#define GL_FLOCK 0x00000800
|
||||
#define GL_NOCANCEL 0x00001000
|
||||
|
||||
#define GLR_TRYFAILED 13
|
||||
#define GLR_CANCELED 14
|
||||
|
||||
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
||||
{
|
||||
|
@ -41,6 +38,8 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
|
|||
spin_lock(&gl->gl_spin);
|
||||
pid = task_pid(current);
|
||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||
break;
|
||||
if (gh->gh_owner_pid == pid)
|
||||
goto out;
|
||||
}
|
||||
|
@ -70,7 +69,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
|
|||
{
|
||||
int ret;
|
||||
spin_lock(&gl->gl_spin);
|
||||
ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3);
|
||||
ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
return ret;
|
||||
}
|
||||
|
@ -98,6 +97,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
|
|||
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
|
||||
|
||||
/**
|
||||
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
|
||||
|
@ -130,10 +130,9 @@ int gfs2_lvb_hold(struct gfs2_glock *gl);
|
|||
void gfs2_lvb_unhold(struct gfs2_glock *gl);
|
||||
|
||||
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
|
||||
|
||||
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
||||
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
||||
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
|
||||
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
||||
|
||||
int __init gfs2_glock_init(void);
|
||||
void gfs2_glock_exit(void);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/buffer_head.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/lm_interface.h>
|
||||
#include <linux/bio.h>
|
||||
|
||||
#include "gfs2.h"
|
||||
#include "incore.h"
|
||||
|
@ -171,26 +172,6 @@ static void inode_go_sync(struct gfs2_glock *gl)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_xmote_bh - After promoting/demoting a glock
|
||||
* @gl: the glock
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_go_xmote_bh(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_holder *gh = gl->gl_req_gh;
|
||||
struct buffer_head *bh;
|
||||
int error;
|
||||
|
||||
if (gl->gl_state != LM_ST_UNLOCKED &&
|
||||
(!gh || !(gh->gh_flags & GL_SKIP))) {
|
||||
error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
|
||||
if (!error)
|
||||
brelse(bh);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_inval - prepare a inode glock to be released
|
||||
* @gl: the glock
|
||||
|
@ -266,6 +247,26 @@ static int inode_go_lock(struct gfs2_holder *gh)
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* inode_go_dump - print information about an inode
|
||||
* @seq: The iterator
|
||||
* @ip: the inode
|
||||
*
|
||||
* Returns: 0 on success, -ENOBUFS when we run out of space
|
||||
*/
|
||||
|
||||
static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
||||
{
|
||||
const struct gfs2_inode *ip = gl->gl_object;
|
||||
if (ip == NULL)
|
||||
return 0;
|
||||
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n",
|
||||
(unsigned long long)ip->i_no_formal_ino,
|
||||
(unsigned long long)ip->i_no_addr,
|
||||
IF2DT(ip->i_inode.i_mode), ip->i_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
|
||||
* @gl: the glock
|
||||
|
@ -305,6 +306,22 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
|
|||
gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
|
||||
}
|
||||
|
||||
/**
|
||||
* rgrp_go_dump - print out an rgrp
|
||||
* @seq: The iterator
|
||||
* @gl: The glock in question
|
||||
*
|
||||
*/
|
||||
|
||||
static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
||||
{
|
||||
const struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||
if (rgd == NULL)
|
||||
return 0;
|
||||
gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* trans_go_sync - promote/demote the transaction glock
|
||||
* @gl: the glock
|
||||
|
@ -330,7 +347,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
|
|||
*
|
||||
*/
|
||||
|
||||
static void trans_go_xmote_bh(struct gfs2_glock *gl)
|
||||
static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
||||
|
@ -338,8 +355,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
|
|||
struct gfs2_log_header_host head;
|
||||
int error;
|
||||
|
||||
if (gl->gl_state != LM_ST_UNLOCKED &&
|
||||
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
||||
|
||||
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
|
||||
|
@ -354,6 +370,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
|
|||
gfs2_log_pointers_init(sdp, head.lh_blkno);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -375,12 +392,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
|
|||
|
||||
const struct gfs2_glock_operations gfs2_inode_glops = {
|
||||
.go_xmote_th = inode_go_sync,
|
||||
.go_xmote_bh = inode_go_xmote_bh,
|
||||
.go_inval = inode_go_inval,
|
||||
.go_demote_ok = inode_go_demote_ok,
|
||||
.go_lock = inode_go_lock,
|
||||
.go_dump = inode_go_dump,
|
||||
.go_type = LM_TYPE_INODE,
|
||||
.go_min_hold_time = HZ / 10,
|
||||
.go_min_hold_time = HZ / 5,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
||||
|
@ -389,8 +406,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
|||
.go_demote_ok = rgrp_go_demote_ok,
|
||||
.go_lock = rgrp_go_lock,
|
||||
.go_unlock = rgrp_go_unlock,
|
||||
.go_dump = rgrp_go_dump,
|
||||
.go_type = LM_TYPE_RGRP,
|
||||
.go_min_hold_time = HZ / 10,
|
||||
.go_min_hold_time = HZ / 5,
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_trans_glops = {
|
||||
|
|
|
@ -77,7 +77,6 @@ struct gfs2_rgrp_host {
|
|||
struct gfs2_rgrpd {
|
||||
struct list_head rd_list; /* Link with superblock */
|
||||
struct list_head rd_list_mru;
|
||||
struct list_head rd_recent; /* Recently used rgrps */
|
||||
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
|
||||
u64 rd_addr; /* grp block disk address */
|
||||
u64 rd_data0; /* first data location */
|
||||
|
@ -128,20 +127,20 @@ struct gfs2_bufdata {
|
|||
|
||||
struct gfs2_glock_operations {
|
||||
void (*go_xmote_th) (struct gfs2_glock *gl);
|
||||
void (*go_xmote_bh) (struct gfs2_glock *gl);
|
||||
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
|
||||
void (*go_inval) (struct gfs2_glock *gl, int flags);
|
||||
int (*go_demote_ok) (struct gfs2_glock *gl);
|
||||
int (*go_lock) (struct gfs2_holder *gh);
|
||||
void (*go_unlock) (struct gfs2_holder *gh);
|
||||
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
|
||||
const int go_type;
|
||||
const unsigned long go_min_hold_time;
|
||||
};
|
||||
|
||||
enum {
|
||||
/* States */
|
||||
HIF_HOLDER = 6,
|
||||
HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
|
||||
HIF_FIRST = 7,
|
||||
HIF_ABORTED = 9,
|
||||
HIF_WAIT = 10,
|
||||
};
|
||||
|
||||
|
@ -154,20 +153,20 @@ struct gfs2_holder {
|
|||
unsigned gh_flags;
|
||||
|
||||
int gh_error;
|
||||
unsigned long gh_iflags;
|
||||
unsigned long gh_iflags; /* HIF_... */
|
||||
unsigned long gh_ip;
|
||||
};
|
||||
|
||||
enum {
|
||||
GLF_LOCK = 1,
|
||||
GLF_STICKY = 2,
|
||||
GLF_DEMOTE = 3,
|
||||
GLF_PENDING_DEMOTE = 4,
|
||||
GLF_DIRTY = 5,
|
||||
GLF_DEMOTE_IN_PROGRESS = 6,
|
||||
GLF_LFLUSH = 7,
|
||||
GLF_WAITERS2 = 8,
|
||||
GLF_CONV_DEADLK = 9,
|
||||
GLF_LOCK = 1,
|
||||
GLF_STICKY = 2,
|
||||
GLF_DEMOTE = 3,
|
||||
GLF_PENDING_DEMOTE = 4,
|
||||
GLF_DEMOTE_IN_PROGRESS = 5,
|
||||
GLF_DIRTY = 6,
|
||||
GLF_LFLUSH = 7,
|
||||
GLF_INVALIDATE_IN_PROGRESS = 8,
|
||||
GLF_REPLY_PENDING = 9,
|
||||
};
|
||||
|
||||
struct gfs2_glock {
|
||||
|
@ -179,19 +178,14 @@ struct gfs2_glock {
|
|||
spinlock_t gl_spin;
|
||||
|
||||
unsigned int gl_state;
|
||||
unsigned int gl_target;
|
||||
unsigned int gl_reply;
|
||||
unsigned int gl_hash;
|
||||
unsigned int gl_demote_state; /* state requested by remote node */
|
||||
unsigned long gl_demote_time; /* time of first demote request */
|
||||
struct pid *gl_owner_pid;
|
||||
unsigned long gl_ip;
|
||||
struct list_head gl_holders;
|
||||
struct list_head gl_waiters1; /* HIF_MUTEX */
|
||||
struct list_head gl_waiters3; /* HIF_PROMOTE */
|
||||
|
||||
const struct gfs2_glock_operations *gl_ops;
|
||||
|
||||
struct gfs2_holder *gl_req_gh;
|
||||
|
||||
void *gl_lock;
|
||||
char *gl_lvb;
|
||||
atomic_t gl_lvb_count;
|
||||
|
@ -427,7 +421,6 @@ struct gfs2_tune {
|
|||
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
|
||||
unsigned int gt_atime_quantum; /* Min secs between atime updates */
|
||||
unsigned int gt_new_files_jdata;
|
||||
unsigned int gt_new_files_directio;
|
||||
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
|
||||
unsigned int gt_stall_secs; /* Detects trouble! */
|
||||
unsigned int gt_complain_secs;
|
||||
|
@ -534,7 +527,6 @@ struct gfs2_sbd {
|
|||
struct mutex sd_rindex_mutex;
|
||||
struct list_head sd_rindex_list;
|
||||
struct list_head sd_rindex_mru_list;
|
||||
struct list_head sd_rindex_recent_list;
|
||||
struct gfs2_rgrpd *sd_rindex_forward;
|
||||
unsigned int sd_rgrps;
|
||||
|
||||
|
|
|
@ -504,7 +504,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
|
|||
}
|
||||
|
||||
if (!is_root) {
|
||||
error = permission(dir, MAY_EXEC, NULL);
|
||||
error = gfs2_permission(dir, MAY_EXEC);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
@ -667,7 +667,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
|
|||
{
|
||||
int error;
|
||||
|
||||
error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
|
||||
error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -789,12 +789,7 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
|
|||
if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
|
||||
gfs2_tune_get(sdp, gt_new_files_jdata))
|
||||
di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
|
||||
if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
|
||||
gfs2_tune_get(sdp, gt_new_files_directio))
|
||||
di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
|
||||
} else if (S_ISDIR(mode)) {
|
||||
di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
|
||||
GFS2_DIF_INHERIT_DIRECTIO);
|
||||
di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
|
||||
GFS2_DIF_INHERIT_JDATA);
|
||||
}
|
||||
|
@ -1134,7 +1129,7 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
|
|||
if (IS_APPEND(&dip->i_inode))
|
||||
return -EPERM;
|
||||
|
||||
error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
|
||||
error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
|
@ -72,7 +72,6 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
|
|||
}
|
||||
|
||||
|
||||
void gfs2_inode_attr_in(struct gfs2_inode *ip);
|
||||
void gfs2_set_iop(struct inode *inode);
|
||||
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
|
||||
u64 no_addr, u64 no_formal_ino,
|
||||
|
@ -91,6 +90,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
|
|||
struct gfs2_inode *ip);
|
||||
int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
|
||||
const struct gfs2_inode *ip);
|
||||
int gfs2_permission(struct inode *inode, int mask);
|
||||
int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
|
||||
int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
|
||||
int gfs2_glock_nq_atime(struct gfs2_holder *gh);
|
||||
|
|
|
@ -23,12 +23,54 @@ struct lmh_wrapper {
|
|||
const struct lm_lockops *lw_ops;
|
||||
};
|
||||
|
||||
static int nolock_mount(char *table_name, char *host_data,
|
||||
lm_callback_t cb, void *cb_data,
|
||||
unsigned int min_lvb_size, int flags,
|
||||
struct lm_lockstruct *lockstruct,
|
||||
struct kobject *fskobj);
|
||||
|
||||
/* List of registered low-level locking protocols. A file system selects one
|
||||
of them by name at mount time, e.g. lock_nolock, lock_dlm. */
|
||||
|
||||
static const struct lm_lockops nolock_ops = {
|
||||
.lm_proto_name = "lock_nolock",
|
||||
.lm_mount = nolock_mount,
|
||||
};
|
||||
|
||||
static struct lmh_wrapper nolock_proto = {
|
||||
.lw_list = LIST_HEAD_INIT(nolock_proto.lw_list),
|
||||
.lw_ops = &nolock_ops,
|
||||
};
|
||||
|
||||
static LIST_HEAD(lmh_list);
|
||||
static DEFINE_MUTEX(lmh_lock);
|
||||
|
||||
static int nolock_mount(char *table_name, char *host_data,
|
||||
lm_callback_t cb, void *cb_data,
|
||||
unsigned int min_lvb_size, int flags,
|
||||
struct lm_lockstruct *lockstruct,
|
||||
struct kobject *fskobj)
|
||||
{
|
||||
char *c;
|
||||
unsigned int jid;
|
||||
|
||||
c = strstr(host_data, "jid=");
|
||||
if (!c)
|
||||
jid = 0;
|
||||
else {
|
||||
c += 4;
|
||||
sscanf(c, "%u", &jid);
|
||||
}
|
||||
|
||||
lockstruct->ls_jid = jid;
|
||||
lockstruct->ls_first = 1;
|
||||
lockstruct->ls_lvb_size = min_lvb_size;
|
||||
lockstruct->ls_ops = &nolock_ops;
|
||||
lockstruct->ls_flags = LM_LSFLAG_LOCAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_register_lockproto - Register a low-level locking protocol
|
||||
* @proto: the protocol definition
|
||||
|
@ -116,9 +158,13 @@ int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
|
|||
int try = 0;
|
||||
int error, found;
|
||||
|
||||
|
||||
retry:
|
||||
mutex_lock(&lmh_lock);
|
||||
|
||||
if (list_empty(&nolock_proto.lw_list))
|
||||
list_add(&nolock_proto.lw_list, &lmh_list);
|
||||
|
||||
found = 0;
|
||||
list_for_each_entry(lw, &lmh_list, lw_list) {
|
||||
if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
|
||||
|
@ -139,7 +185,8 @@ int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!try_module_get(lw->lw_ops->lm_owner)) {
|
||||
if (lw->lw_ops->lm_owner &&
|
||||
!try_module_get(lw->lw_ops->lm_owner)) {
|
||||
try = 0;
|
||||
mutex_unlock(&lmh_lock);
|
||||
msleep(1000);
|
||||
|
@ -158,7 +205,8 @@ int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
|
|||
void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct)
|
||||
{
|
||||
mutex_lock(&lmh_lock);
|
||||
lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
|
||||
if (lockstruct->ls_ops->lm_unmount)
|
||||
lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
|
||||
if (lockstruct->ls_ops->lm_owner)
|
||||
module_put(lockstruct->ls_ops->lm_owner);
|
||||
mutex_unlock(&lmh_lock);
|
||||
|
|
|
@ -11,27 +11,287 @@
|
|||
|
||||
static char junk_lvb[GDLM_LVB_SIZE];
|
||||
|
||||
static void queue_complete(struct gdlm_lock *lp)
|
||||
|
||||
/* convert dlm lock-mode to gfs lock-state */
|
||||
|
||||
static s16 gdlm_make_lmstate(s16 dlmmode)
|
||||
{
|
||||
switch (dlmmode) {
|
||||
case DLM_LOCK_IV:
|
||||
case DLM_LOCK_NL:
|
||||
return LM_ST_UNLOCKED;
|
||||
case DLM_LOCK_EX:
|
||||
return LM_ST_EXCLUSIVE;
|
||||
case DLM_LOCK_CW:
|
||||
return LM_ST_DEFERRED;
|
||||
case DLM_LOCK_PR:
|
||||
return LM_ST_SHARED;
|
||||
}
|
||||
gdlm_assert(0, "unknown DLM mode %d", dlmmode);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
|
||||
thread gets to it. */
|
||||
|
||||
static void queue_submit(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
clear_bit(LFL_ACTIVE, &lp->flags);
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add_tail(&lp->clist, &ls->complete);
|
||||
list_add_tail(&lp->delay_list, &ls->submit);
|
||||
spin_unlock(&ls->async_lock);
|
||||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
static inline void gdlm_ast(void *astarg)
|
||||
static void wake_up_ast(struct gdlm_lock *lp)
|
||||
{
|
||||
queue_complete(astarg);
|
||||
clear_bit(LFL_AST_WAIT, &lp->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
wake_up_bit(&lp->flags, LFL_AST_WAIT);
|
||||
}
|
||||
|
||||
static inline void gdlm_bast(void *astarg, int mode)
|
||||
static void gdlm_delete_lp(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
if (!list_empty(&lp->delay_list))
|
||||
list_del_init(&lp->delay_list);
|
||||
ls->all_locks_count--;
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
kfree(lp);
|
||||
}
|
||||
|
||||
static void gdlm_queue_delayed(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add_tail(&lp->delay_list, &ls->delayed);
|
||||
spin_unlock(&ls->async_lock);
|
||||
}
|
||||
|
||||
static void process_complete(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
struct lm_async_cb acb;
|
||||
|
||||
memset(&acb, 0, sizeof(acb));
|
||||
|
||||
if (lp->lksb.sb_status == -DLM_ECANCEL) {
|
||||
log_info("complete dlm cancel %x,%llx flags %lx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
|
||||
lp->req = lp->cur;
|
||||
acb.lc_ret |= LM_OUT_CANCELED;
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
lp->lksb.sb_lkid = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
|
||||
if (lp->lksb.sb_status != -DLM_EUNLOCK) {
|
||||
log_info("unlock sb_status %d %x,%llx flags %lx",
|
||||
lp->lksb.sb_status, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
lp->cur = DLM_LOCK_IV;
|
||||
lp->req = DLM_LOCK_IV;
|
||||
lp->lksb.sb_lkid = 0;
|
||||
|
||||
if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
|
||||
gdlm_delete_lp(lp);
|
||||
return;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
|
||||
memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
|
||||
if (lp->req == DLM_LOCK_PR)
|
||||
lp->req = DLM_LOCK_CW;
|
||||
else if (lp->req == DLM_LOCK_CW)
|
||||
lp->req = DLM_LOCK_PR;
|
||||
}
|
||||
|
||||
/*
|
||||
* A canceled lock request. The lock was just taken off the delayed
|
||||
* list and was never even submitted to dlm.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
|
||||
log_info("complete internal cancel %x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
lp->req = lp->cur;
|
||||
acb.lc_ret |= LM_OUT_CANCELED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* An error occured.
|
||||
*/
|
||||
|
||||
if (lp->lksb.sb_status) {
|
||||
/* a "normal" error */
|
||||
if ((lp->lksb.sb_status == -EAGAIN) &&
|
||||
(lp->lkf & DLM_LKF_NOQUEUE)) {
|
||||
lp->req = lp->cur;
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
lp->lksb.sb_lkid = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* this could only happen with cancels I think */
|
||||
log_info("ast sb_status %d %x,%llx flags %lx",
|
||||
lp->lksb.sb_status, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is an AST for an EX->EX conversion for sync_lvb from GFS.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
|
||||
wake_up_ast(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A lock has been demoted to NL because it initially completed during
|
||||
* BLOCK_LOCKS. Now it must be requested in the originally requested
|
||||
* mode.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
|
||||
gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
|
||||
lp->cur = DLM_LOCK_NL;
|
||||
lp->req = lp->prev_req;
|
||||
lp->prev_req = DLM_LOCK_IV;
|
||||
lp->lkf &= ~DLM_LKF_CONVDEADLK;
|
||||
|
||||
set_bit(LFL_NOCACHE, &lp->flags);
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
|
||||
!test_bit(LFL_NOBLOCK, &lp->flags))
|
||||
gdlm_queue_delayed(lp);
|
||||
else
|
||||
queue_submit(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A request is granted during dlm recovery. It may be granted
|
||||
* because the locks of a failed node were cleared. In that case,
|
||||
* there may be inconsistent data beneath this lock and we must wait
|
||||
* for recovery to complete to use it. When gfs recovery is done this
|
||||
* granted lock will be converted to NL and then reacquired in this
|
||||
* granted state.
|
||||
*/
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
|
||||
!test_bit(LFL_NOBLOCK, &lp->flags) &&
|
||||
lp->req != DLM_LOCK_NL) {
|
||||
|
||||
lp->cur = lp->req;
|
||||
lp->prev_req = lp->req;
|
||||
lp->req = DLM_LOCK_NL;
|
||||
lp->lkf |= DLM_LKF_CONVERT;
|
||||
lp->lkf &= ~DLM_LKF_CONVDEADLK;
|
||||
|
||||
log_debug("rereq %x,%llx id %x %d,%d",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->lksb.sb_lkid, lp->cur, lp->req);
|
||||
|
||||
set_bit(LFL_REREQUEST, &lp->flags);
|
||||
queue_submit(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* DLM demoted the lock to NL before it was granted so GFS must be
|
||||
* told it cannot cache data for this lock.
|
||||
*/
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
|
||||
set_bit(LFL_NOCACHE, &lp->flags);
|
||||
|
||||
out:
|
||||
/*
|
||||
* This is an internal lock_dlm lock
|
||||
*/
|
||||
|
||||
if (test_bit(LFL_INLOCK, &lp->flags)) {
|
||||
clear_bit(LFL_NOBLOCK, &lp->flags);
|
||||
lp->cur = lp->req;
|
||||
wake_up_ast(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normal completion of a lock request. Tell GFS it now has the lock.
|
||||
*/
|
||||
|
||||
clear_bit(LFL_NOBLOCK, &lp->flags);
|
||||
lp->cur = lp->req;
|
||||
|
||||
acb.lc_name = lp->lockname;
|
||||
acb.lc_ret |= gdlm_make_lmstate(lp->cur);
|
||||
|
||||
ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
|
||||
}
|
||||
|
||||
static void gdlm_ast(void *astarg)
|
||||
{
|
||||
struct gdlm_lock *lp = astarg;
|
||||
clear_bit(LFL_ACTIVE, &lp->flags);
|
||||
process_complete(lp);
|
||||
}
|
||||
|
||||
static void process_blocking(struct gdlm_lock *lp, int bast_mode)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
unsigned int cb = 0;
|
||||
|
||||
switch (gdlm_make_lmstate(bast_mode)) {
|
||||
case LM_ST_EXCLUSIVE:
|
||||
cb = LM_CB_NEED_E;
|
||||
break;
|
||||
case LM_ST_DEFERRED:
|
||||
cb = LM_CB_NEED_D;
|
||||
break;
|
||||
case LM_ST_SHARED:
|
||||
cb = LM_CB_NEED_S;
|
||||
break;
|
||||
default:
|
||||
gdlm_assert(0, "unknown bast mode %u", bast_mode);
|
||||
}
|
||||
|
||||
ls->fscb(ls->sdp, cb, &lp->lockname);
|
||||
}
|
||||
|
||||
|
||||
static void gdlm_bast(void *astarg, int mode)
|
||||
{
|
||||
struct gdlm_lock *lp = astarg;
|
||||
|
||||
if (!mode) {
|
||||
printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
|
||||
|
@ -40,23 +300,7 @@ static inline void gdlm_bast(void *astarg, int mode)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
if (!lp->bast_mode) {
|
||||
list_add_tail(&lp->blist, &ls->blocking);
|
||||
lp->bast_mode = mode;
|
||||
} else if (lp->bast_mode < mode)
|
||||
lp->bast_mode = mode;
|
||||
spin_unlock(&ls->async_lock);
|
||||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
void gdlm_queue_delayed(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add_tail(&lp->delay_list, &ls->delayed);
|
||||
spin_unlock(&ls->async_lock);
|
||||
process_blocking(lp, mode);
|
||||
}
|
||||
|
||||
/* convert gfs lock-state to dlm lock-mode */
|
||||
|
@ -77,24 +321,6 @@ static s16 make_mode(s16 lmstate)
|
|||
return -1;
|
||||
}
|
||||
|
||||
/* convert dlm lock-mode to gfs lock-state */
|
||||
|
||||
s16 gdlm_make_lmstate(s16 dlmmode)
|
||||
{
|
||||
switch (dlmmode) {
|
||||
case DLM_LOCK_IV:
|
||||
case DLM_LOCK_NL:
|
||||
return LM_ST_UNLOCKED;
|
||||
case DLM_LOCK_EX:
|
||||
return LM_ST_EXCLUSIVE;
|
||||
case DLM_LOCK_CW:
|
||||
return LM_ST_DEFERRED;
|
||||
case DLM_LOCK_PR:
|
||||
return LM_ST_SHARED;
|
||||
}
|
||||
gdlm_assert(0, "unknown DLM mode %d", dlmmode);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
|
||||
DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
|
||||
|
@ -134,14 +360,6 @@ static inline unsigned int make_flags(struct gdlm_lock *lp,
|
|||
|
||||
if (lp->lksb.sb_lkid != 0) {
|
||||
lkf |= DLM_LKF_CONVERT;
|
||||
|
||||
/* Conversion deadlock avoidance by DLM */
|
||||
|
||||
if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
|
||||
!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
|
||||
!(lkf & DLM_LKF_NOQUEUE) &&
|
||||
cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
|
||||
lkf |= DLM_LKF_CONVDEADLK;
|
||||
}
|
||||
|
||||
if (lp->lvb)
|
||||
|
@ -173,14 +391,9 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
|
|||
make_strname(name, &lp->strname);
|
||||
lp->ls = ls;
|
||||
lp->cur = DLM_LOCK_IV;
|
||||
lp->lvb = NULL;
|
||||
lp->hold_null = NULL;
|
||||
INIT_LIST_HEAD(&lp->clist);
|
||||
INIT_LIST_HEAD(&lp->blist);
|
||||
INIT_LIST_HEAD(&lp->delay_list);
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add(&lp->all_list, &ls->all_locks);
|
||||
ls->all_locks_count++;
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
|
@ -188,26 +401,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void gdlm_delete_lp(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
if (!list_empty(&lp->clist))
|
||||
list_del_init(&lp->clist);
|
||||
if (!list_empty(&lp->blist))
|
||||
list_del_init(&lp->blist);
|
||||
if (!list_empty(&lp->delay_list))
|
||||
list_del_init(&lp->delay_list);
|
||||
gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
list_del_init(&lp->all_list);
|
||||
ls->all_locks_count--;
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
kfree(lp);
|
||||
}
|
||||
|
||||
int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
|
||||
void **lockp)
|
||||
{
|
||||
|
@ -261,7 +454,7 @@ unsigned int gdlm_do_lock(struct gdlm_lock *lp)
|
|||
|
||||
if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
|
||||
lp->lksb.sb_status = -EAGAIN;
|
||||
queue_complete(lp);
|
||||
gdlm_ast(lp);
|
||||
error = 0;
|
||||
}
|
||||
|
||||
|
@ -308,6 +501,12 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state,
|
|||
{
|
||||
struct gdlm_lock *lp = lock;
|
||||
|
||||
if (req_state == LM_ST_UNLOCKED)
|
||||
return gdlm_unlock(lock, cur_state);
|
||||
|
||||
if (req_state == LM_ST_UNLOCKED)
|
||||
return gdlm_unlock(lock, cur_state);
|
||||
|
||||
clear_bit(LFL_DLM_CANCEL, &lp->flags);
|
||||
if (flags & LM_FLAG_NOEXP)
|
||||
set_bit(LFL_NOBLOCK, &lp->flags);
|
||||
|
@ -351,7 +550,7 @@ void gdlm_cancel(void *lock)
|
|||
if (delay_list) {
|
||||
set_bit(LFL_CANCEL, &lp->flags);
|
||||
set_bit(LFL_ACTIVE, &lp->flags);
|
||||
queue_complete(lp);
|
||||
gdlm_ast(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -507,22 +706,3 @@ void gdlm_submit_delayed(struct gdlm_ls *ls)
|
|||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
int gdlm_release_all_locks(struct gdlm_ls *ls)
|
||||
{
|
||||
struct gdlm_lock *lp, *safe;
|
||||
int count = 0;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
|
||||
list_del_init(&lp->all_list);
|
||||
|
||||
if (lp->lvb && lp->lvb != junk_lvb)
|
||||
kfree(lp->lvb);
|
||||
kfree(lp);
|
||||
count++;
|
||||
}
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,19 +72,12 @@ struct gdlm_ls {
|
|||
int recover_jid_done;
|
||||
int recover_jid_status;
|
||||
spinlock_t async_lock;
|
||||
struct list_head complete;
|
||||
struct list_head blocking;
|
||||
struct list_head delayed;
|
||||
struct list_head submit;
|
||||
struct list_head all_locks;
|
||||
u32 all_locks_count;
|
||||
wait_queue_head_t wait_control;
|
||||
struct task_struct *thread1;
|
||||
struct task_struct *thread2;
|
||||
struct task_struct *thread;
|
||||
wait_queue_head_t thread_wait;
|
||||
unsigned long drop_time;
|
||||
int drop_locks_count;
|
||||
int drop_locks_period;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -117,12 +110,7 @@ struct gdlm_lock {
|
|||
u32 lkf; /* dlm flags DLM_LKF_ */
|
||||
unsigned long flags; /* lock_dlm flags LFL_ */
|
||||
|
||||
int bast_mode; /* protected by async_lock */
|
||||
|
||||
struct list_head clist; /* complete */
|
||||
struct list_head blist; /* blocking */
|
||||
struct list_head delay_list; /* delayed */
|
||||
struct list_head all_list; /* all locks for the fs */
|
||||
struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
|
||||
};
|
||||
|
||||
|
@ -159,11 +147,7 @@ void gdlm_release_threads(struct gdlm_ls *);
|
|||
|
||||
/* lock.c */
|
||||
|
||||
s16 gdlm_make_lmstate(s16);
|
||||
void gdlm_queue_delayed(struct gdlm_lock *);
|
||||
void gdlm_submit_delayed(struct gdlm_ls *);
|
||||
int gdlm_release_all_locks(struct gdlm_ls *);
|
||||
void gdlm_delete_lp(struct gdlm_lock *);
|
||||
unsigned int gdlm_do_lock(struct gdlm_lock *);
|
||||
|
||||
int gdlm_get_lock(void *, struct lm_lockname *, void **);
|
||||
|
|
|
@ -22,22 +22,14 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
|
|||
if (!ls)
|
||||
return NULL;
|
||||
|
||||
ls->drop_locks_count = GDLM_DROP_COUNT;
|
||||
ls->drop_locks_period = GDLM_DROP_PERIOD;
|
||||
ls->fscb = cb;
|
||||
ls->sdp = sdp;
|
||||
ls->fsflags = flags;
|
||||
spin_lock_init(&ls->async_lock);
|
||||
INIT_LIST_HEAD(&ls->complete);
|
||||
INIT_LIST_HEAD(&ls->blocking);
|
||||
INIT_LIST_HEAD(&ls->delayed);
|
||||
INIT_LIST_HEAD(&ls->submit);
|
||||
INIT_LIST_HEAD(&ls->all_locks);
|
||||
init_waitqueue_head(&ls->thread_wait);
|
||||
init_waitqueue_head(&ls->wait_control);
|
||||
ls->thread1 = NULL;
|
||||
ls->thread2 = NULL;
|
||||
ls->drop_time = jiffies;
|
||||
ls->jid = -1;
|
||||
|
||||
strncpy(buf, table_name, 256);
|
||||
|
@ -180,7 +172,6 @@ static int gdlm_mount(char *table_name, char *host_data,
|
|||
static void gdlm_unmount(void *lockspace)
|
||||
{
|
||||
struct gdlm_ls *ls = lockspace;
|
||||
int rv;
|
||||
|
||||
log_debug("unmount flags %lx", ls->flags);
|
||||
|
||||
|
@ -194,9 +185,7 @@ static void gdlm_unmount(void *lockspace)
|
|||
gdlm_kobject_release(ls);
|
||||
dlm_release_lockspace(ls->dlm_lockspace, 2);
|
||||
gdlm_release_threads(ls);
|
||||
rv = gdlm_release_all_locks(ls);
|
||||
if (rv)
|
||||
log_info("gdlm_unmount: %d stray locks freed", rv);
|
||||
BUG_ON(ls->all_locks_count);
|
||||
out:
|
||||
kfree(ls);
|
||||
}
|
||||
|
@ -232,7 +221,6 @@ static void gdlm_withdraw(void *lockspace)
|
|||
|
||||
dlm_release_lockspace(ls->dlm_lockspace, 2);
|
||||
gdlm_release_threads(ls);
|
||||
gdlm_release_all_locks(ls);
|
||||
gdlm_kobject_release(ls);
|
||||
}
|
||||
|
||||
|
|
|
@ -114,17 +114,6 @@ static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
|
|||
return sprintf(buf, "%d\n", ls->recover_jid_status);
|
||||
}
|
||||
|
||||
static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", ls->drop_locks_count);
|
||||
}
|
||||
|
||||
static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len)
|
||||
{
|
||||
ls->drop_locks_count = simple_strtol(buf, NULL, 0);
|
||||
return len;
|
||||
}
|
||||
|
||||
struct gdlm_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct gdlm_ls *, char *);
|
||||
|
@ -144,7 +133,6 @@ GDLM_ATTR(first_done, 0444, first_done_show, NULL);
|
|||
GDLM_ATTR(recover, 0644, recover_show, recover_store);
|
||||
GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
|
||||
GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
|
||||
GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store);
|
||||
|
||||
static struct attribute *gdlm_attrs[] = {
|
||||
&gdlm_attr_proto_name.attr,
|
||||
|
@ -157,7 +145,6 @@ static struct attribute *gdlm_attrs[] = {
|
|||
&gdlm_attr_recover.attr,
|
||||
&gdlm_attr_recover_done.attr,
|
||||
&gdlm_attr_recover_status.attr,
|
||||
&gdlm_attr_drop_count.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -9,367 +9,60 @@
|
|||
|
||||
#include "lock_dlm.h"
|
||||
|
||||
/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
|
||||
thread gets to it. */
|
||||
|
||||
static void queue_submit(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
list_add_tail(&lp->delay_list, &ls->submit);
|
||||
spin_unlock(&ls->async_lock);
|
||||
wake_up(&ls->thread_wait);
|
||||
}
|
||||
|
||||
static void process_blocking(struct gdlm_lock *lp, int bast_mode)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
unsigned int cb = 0;
|
||||
|
||||
switch (gdlm_make_lmstate(bast_mode)) {
|
||||
case LM_ST_EXCLUSIVE:
|
||||
cb = LM_CB_NEED_E;
|
||||
break;
|
||||
case LM_ST_DEFERRED:
|
||||
cb = LM_CB_NEED_D;
|
||||
break;
|
||||
case LM_ST_SHARED:
|
||||
cb = LM_CB_NEED_S;
|
||||
break;
|
||||
default:
|
||||
gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
|
||||
}
|
||||
|
||||
ls->fscb(ls->sdp, cb, &lp->lockname);
|
||||
}
|
||||
|
||||
static void wake_up_ast(struct gdlm_lock *lp)
|
||||
{
|
||||
clear_bit(LFL_AST_WAIT, &lp->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
wake_up_bit(&lp->flags, LFL_AST_WAIT);
|
||||
}
|
||||
|
||||
static void process_complete(struct gdlm_lock *lp)
|
||||
{
|
||||
struct gdlm_ls *ls = lp->ls;
|
||||
struct lm_async_cb acb;
|
||||
s16 prev_mode = lp->cur;
|
||||
|
||||
memset(&acb, 0, sizeof(acb));
|
||||
|
||||
if (lp->lksb.sb_status == -DLM_ECANCEL) {
|
||||
log_info("complete dlm cancel %x,%llx flags %lx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
|
||||
lp->req = lp->cur;
|
||||
acb.lc_ret |= LM_OUT_CANCELED;
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
lp->lksb.sb_lkid = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
|
||||
if (lp->lksb.sb_status != -DLM_EUNLOCK) {
|
||||
log_info("unlock sb_status %d %x,%llx flags %lx",
|
||||
lp->lksb.sb_status, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
lp->cur = DLM_LOCK_IV;
|
||||
lp->req = DLM_LOCK_IV;
|
||||
lp->lksb.sb_lkid = 0;
|
||||
|
||||
if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
|
||||
gdlm_delete_lp(lp);
|
||||
return;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
|
||||
memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
|
||||
if (lp->req == DLM_LOCK_PR)
|
||||
lp->req = DLM_LOCK_CW;
|
||||
else if (lp->req == DLM_LOCK_CW)
|
||||
lp->req = DLM_LOCK_PR;
|
||||
}
|
||||
|
||||
/*
|
||||
* A canceled lock request. The lock was just taken off the delayed
|
||||
* list and was never even submitted to dlm.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
|
||||
log_info("complete internal cancel %x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
lp->req = lp->cur;
|
||||
acb.lc_ret |= LM_OUT_CANCELED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* An error occured.
|
||||
*/
|
||||
|
||||
if (lp->lksb.sb_status) {
|
||||
/* a "normal" error */
|
||||
if ((lp->lksb.sb_status == -EAGAIN) &&
|
||||
(lp->lkf & DLM_LKF_NOQUEUE)) {
|
||||
lp->req = lp->cur;
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
lp->lksb.sb_lkid = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* this could only happen with cancels I think */
|
||||
log_info("ast sb_status %d %x,%llx flags %lx",
|
||||
lp->lksb.sb_status, lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->flags);
|
||||
if (lp->lksb.sb_status == -EDEADLOCK &&
|
||||
lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
|
||||
lp->req = lp->cur;
|
||||
acb.lc_ret |= LM_OUT_CONV_DEADLK;
|
||||
if (lp->cur == DLM_LOCK_IV)
|
||||
lp->lksb.sb_lkid = 0;
|
||||
goto out;
|
||||
} else
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is an AST for an EX->EX conversion for sync_lvb from GFS.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
|
||||
wake_up_ast(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A lock has been demoted to NL because it initially completed during
|
||||
* BLOCK_LOCKS. Now it must be requested in the originally requested
|
||||
* mode.
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
|
||||
gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number);
|
||||
|
||||
lp->cur = DLM_LOCK_NL;
|
||||
lp->req = lp->prev_req;
|
||||
lp->prev_req = DLM_LOCK_IV;
|
||||
lp->lkf &= ~DLM_LKF_CONVDEADLK;
|
||||
|
||||
set_bit(LFL_NOCACHE, &lp->flags);
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
|
||||
!test_bit(LFL_NOBLOCK, &lp->flags))
|
||||
gdlm_queue_delayed(lp);
|
||||
else
|
||||
queue_submit(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* A request is granted during dlm recovery. It may be granted
|
||||
* because the locks of a failed node were cleared. In that case,
|
||||
* there may be inconsistent data beneath this lock and we must wait
|
||||
* for recovery to complete to use it. When gfs recovery is done this
|
||||
* granted lock will be converted to NL and then reacquired in this
|
||||
* granted state.
|
||||
*/
|
||||
|
||||
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
|
||||
!test_bit(LFL_NOBLOCK, &lp->flags) &&
|
||||
lp->req != DLM_LOCK_NL) {
|
||||
|
||||
lp->cur = lp->req;
|
||||
lp->prev_req = lp->req;
|
||||
lp->req = DLM_LOCK_NL;
|
||||
lp->lkf |= DLM_LKF_CONVERT;
|
||||
lp->lkf &= ~DLM_LKF_CONVDEADLK;
|
||||
|
||||
log_debug("rereq %x,%llx id %x %d,%d",
|
||||
lp->lockname.ln_type,
|
||||
(unsigned long long)lp->lockname.ln_number,
|
||||
lp->lksb.sb_lkid, lp->cur, lp->req);
|
||||
|
||||
set_bit(LFL_REREQUEST, &lp->flags);
|
||||
queue_submit(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* DLM demoted the lock to NL before it was granted so GFS must be
|
||||
* told it cannot cache data for this lock.
|
||||
*/
|
||||
|
||||
if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
|
||||
set_bit(LFL_NOCACHE, &lp->flags);
|
||||
|
||||
out:
|
||||
/*
|
||||
* This is an internal lock_dlm lock
|
||||
*/
|
||||
|
||||
if (test_bit(LFL_INLOCK, &lp->flags)) {
|
||||
clear_bit(LFL_NOBLOCK, &lp->flags);
|
||||
lp->cur = lp->req;
|
||||
wake_up_ast(lp);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normal completion of a lock request. Tell GFS it now has the lock.
|
||||
*/
|
||||
|
||||
clear_bit(LFL_NOBLOCK, &lp->flags);
|
||||
lp->cur = lp->req;
|
||||
|
||||
acb.lc_name = lp->lockname;
|
||||
acb.lc_ret |= gdlm_make_lmstate(lp->cur);
|
||||
|
||||
if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
|
||||
(lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
|
||||
acb.lc_ret |= LM_OUT_CACHEABLE;
|
||||
|
||||
ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
|
||||
}
|
||||
|
||||
static inline int no_work(struct gdlm_ls *ls, int blocking)
|
||||
static inline int no_work(struct gdlm_ls *ls)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
ret = list_empty(&ls->complete) && list_empty(&ls->submit);
|
||||
if (ret && blocking)
|
||||
ret = list_empty(&ls->blocking);
|
||||
ret = list_empty(&ls->submit);
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int check_drop(struct gdlm_ls *ls)
|
||||
{
|
||||
if (!ls->drop_locks_count)
|
||||
return 0;
|
||||
|
||||
if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
|
||||
ls->drop_time = jiffies;
|
||||
if (ls->all_locks_count >= ls->drop_locks_count)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdlm_thread(void *data, int blist)
|
||||
static int gdlm_thread(void *data)
|
||||
{
|
||||
struct gdlm_ls *ls = (struct gdlm_ls *) data;
|
||||
struct gdlm_lock *lp = NULL;
|
||||
uint8_t complete, blocking, submit, drop;
|
||||
|
||||
/* Only thread1 is allowed to do blocking callbacks since gfs
|
||||
may wait for a completion callback within a blocking cb. */
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
wait_event_interruptible(ls->thread_wait,
|
||||
!no_work(ls, blist) || kthread_should_stop());
|
||||
|
||||
complete = blocking = submit = drop = 0;
|
||||
!no_work(ls) || kthread_should_stop());
|
||||
|
||||
spin_lock(&ls->async_lock);
|
||||
|
||||
if (blist && !list_empty(&ls->blocking)) {
|
||||
lp = list_entry(ls->blocking.next, struct gdlm_lock,
|
||||
blist);
|
||||
list_del_init(&lp->blist);
|
||||
blocking = lp->bast_mode;
|
||||
lp->bast_mode = 0;
|
||||
} else if (!list_empty(&ls->complete)) {
|
||||
lp = list_entry(ls->complete.next, struct gdlm_lock,
|
||||
clist);
|
||||
list_del_init(&lp->clist);
|
||||
complete = 1;
|
||||
} else if (!list_empty(&ls->submit)) {
|
||||
if (!list_empty(&ls->submit)) {
|
||||
lp = list_entry(ls->submit.next, struct gdlm_lock,
|
||||
delay_list);
|
||||
list_del_init(&lp->delay_list);
|
||||
submit = 1;
|
||||
}
|
||||
|
||||
drop = check_drop(ls);
|
||||
spin_unlock(&ls->async_lock);
|
||||
|
||||
if (complete)
|
||||
process_complete(lp);
|
||||
|
||||
else if (blocking)
|
||||
process_blocking(lp, blocking);
|
||||
|
||||
else if (submit)
|
||||
spin_unlock(&ls->async_lock);
|
||||
gdlm_do_lock(lp);
|
||||
|
||||
if (drop)
|
||||
ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
|
||||
|
||||
schedule();
|
||||
spin_lock(&ls->async_lock);
|
||||
}
|
||||
spin_unlock(&ls->async_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdlm_thread1(void *data)
|
||||
{
|
||||
return gdlm_thread(data, 1);
|
||||
}
|
||||
|
||||
static int gdlm_thread2(void *data)
|
||||
{
|
||||
return gdlm_thread(data, 0);
|
||||
}
|
||||
|
||||
int gdlm_init_threads(struct gdlm_ls *ls)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int error;
|
||||
|
||||
p = kthread_run(gdlm_thread1, ls, "lock_dlm1");
|
||||
p = kthread_run(gdlm_thread, ls, "lock_dlm");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
log_error("can't start lock_dlm1 thread %d", error);
|
||||
log_error("can't start lock_dlm thread %d", error);
|
||||
return error;
|
||||
}
|
||||
ls->thread1 = p;
|
||||
|
||||
p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
|
||||
error = IS_ERR(p);
|
||||
if (error) {
|
||||
log_error("can't start lock_dlm2 thread %d", error);
|
||||
kthread_stop(ls->thread1);
|
||||
return error;
|
||||
}
|
||||
ls->thread2 = p;
|
||||
ls->thread = p;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gdlm_release_threads(struct gdlm_ls *ls)
|
||||
{
|
||||
kthread_stop(ls->thread1);
|
||||
kthread_stop(ls->thread2);
|
||||
kthread_stop(ls->thread);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += lock_nolock.o
|
||||
lock_nolock-y := main.o
|
||||
|
|
@ -1,238 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
||||
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License version 2.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/lm_interface.h>
|
||||
|
||||
struct nolock_lockspace {
|
||||
unsigned int nl_lvb_size;
|
||||
};
|
||||
|
||||
static const struct lm_lockops nolock_ops;
|
||||
|
||||
static int nolock_mount(char *table_name, char *host_data,
|
||||
lm_callback_t cb, void *cb_data,
|
||||
unsigned int min_lvb_size, int flags,
|
||||
struct lm_lockstruct *lockstruct,
|
||||
struct kobject *fskobj)
|
||||
{
|
||||
char *c;
|
||||
unsigned int jid;
|
||||
struct nolock_lockspace *nl;
|
||||
|
||||
c = strstr(host_data, "jid=");
|
||||
if (!c)
|
||||
jid = 0;
|
||||
else {
|
||||
c += 4;
|
||||
sscanf(c, "%u", &jid);
|
||||
}
|
||||
|
||||
nl = kzalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
|
||||
if (!nl)
|
||||
return -ENOMEM;
|
||||
|
||||
nl->nl_lvb_size = min_lvb_size;
|
||||
|
||||
lockstruct->ls_jid = jid;
|
||||
lockstruct->ls_first = 1;
|
||||
lockstruct->ls_lvb_size = min_lvb_size;
|
||||
lockstruct->ls_lockspace = nl;
|
||||
lockstruct->ls_ops = &nolock_ops;
|
||||
lockstruct->ls_flags = LM_LSFLAG_LOCAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nolock_others_may_mount(void *lockspace)
|
||||
{
|
||||
}
|
||||
|
||||
static void nolock_unmount(void *lockspace)
|
||||
{
|
||||
struct nolock_lockspace *nl = lockspace;
|
||||
kfree(nl);
|
||||
}
|
||||
|
||||
static void nolock_withdraw(void *lockspace)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_get_lock - get a lm_lock_t given a descripton of the lock
|
||||
* @lockspace: the lockspace the lock lives in
|
||||
* @name: the name of the lock
|
||||
* @lockp: return the lm_lock_t here
|
||||
*
|
||||
* Returns: 0 on success, -EXXX on failure
|
||||
*/
|
||||
|
||||
static int nolock_get_lock(void *lockspace, struct lm_lockname *name,
|
||||
void **lockp)
|
||||
{
|
||||
*lockp = lockspace;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_put_lock - get rid of a lock structure
|
||||
* @lock: the lock to throw away
|
||||
*
|
||||
*/
|
||||
|
||||
static void nolock_put_lock(void *lock)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_lock - acquire a lock
|
||||
* @lock: the lock to manipulate
|
||||
* @cur_state: the current state
|
||||
* @req_state: the requested state
|
||||
* @flags: modifier flags
|
||||
*
|
||||
* Returns: A bitmap of LM_OUT_*
|
||||
*/
|
||||
|
||||
static unsigned int nolock_lock(void *lock, unsigned int cur_state,
|
||||
unsigned int req_state, unsigned int flags)
|
||||
{
|
||||
return req_state | LM_OUT_CACHEABLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_unlock - unlock a lock
|
||||
* @lock: the lock to manipulate
|
||||
* @cur_state: the current state
|
||||
*
|
||||
* Returns: 0
|
||||
*/
|
||||
|
||||
static unsigned int nolock_unlock(void *lock, unsigned int cur_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nolock_cancel(void *lock)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_hold_lvb - hold on to a lock value block
|
||||
* @lock: the lock the LVB is associated with
|
||||
* @lvbp: return the lm_lvb_t here
|
||||
*
|
||||
* Returns: 0 on success, -EXXX on failure
|
||||
*/
|
||||
|
||||
static int nolock_hold_lvb(void *lock, char **lvbp)
|
||||
{
|
||||
struct nolock_lockspace *nl = lock;
|
||||
int error = 0;
|
||||
|
||||
*lvbp = kzalloc(nl->nl_lvb_size, GFP_NOFS);
|
||||
if (!*lvbp)
|
||||
error = -ENOMEM;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* nolock_unhold_lvb - release a LVB
|
||||
* @lock: the lock the LVB is associated with
|
||||
* @lvb: the lock value block
|
||||
*
|
||||
*/
|
||||
|
||||
static void nolock_unhold_lvb(void *lock, char *lvb)
|
||||
{
|
||||
kfree(lvb);
|
||||
}
|
||||
|
||||
static int nolock_plock_get(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
posix_test_lock(file, fl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nolock_plock(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, int cmd, struct file_lock *fl)
|
||||
{
|
||||
int error;
|
||||
error = posix_lock_file_wait(file, fl);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int nolock_punlock(void *lockspace, struct lm_lockname *name,
|
||||
struct file *file, struct file_lock *fl)
|
||||
{
|
||||
int error;
|
||||
error = posix_lock_file_wait(file, fl);
|
||||
return error;
|
||||
}
|
||||
|
||||
static void nolock_recovery_done(void *lockspace, unsigned int jid,
|
||||
unsigned int message)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct lm_lockops nolock_ops = {
|
||||
.lm_proto_name = "lock_nolock",
|
||||
.lm_mount = nolock_mount,
|
||||
.lm_others_may_mount = nolock_others_may_mount,
|
||||
.lm_unmount = nolock_unmount,
|
||||
.lm_withdraw = nolock_withdraw,
|
||||
.lm_get_lock = nolock_get_lock,
|
||||
.lm_put_lock = nolock_put_lock,
|
||||
.lm_lock = nolock_lock,
|
||||
.lm_unlock = nolock_unlock,
|
||||
.lm_cancel = nolock_cancel,
|
||||
.lm_hold_lvb = nolock_hold_lvb,
|
||||
.lm_unhold_lvb = nolock_unhold_lvb,
|
||||
.lm_plock_get = nolock_plock_get,
|
||||
.lm_plock = nolock_plock,
|
||||
.lm_punlock = nolock_punlock,
|
||||
.lm_recovery_done = nolock_recovery_done,
|
||||
.lm_owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init init_nolock(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = gfs2_register_lockproto(&nolock_ops);
|
||||
if (error) {
|
||||
printk(KERN_WARNING
|
||||
"lock_nolock: can't register protocol: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
"Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit exit_nolock(void)
|
||||
{
|
||||
gfs2_unregister_lockproto(&nolock_ops);
|
||||
}
|
||||
|
||||
module_init(init_nolock);
|
||||
module_exit(exit_nolock);
|
||||
|
||||
MODULE_DESCRIPTION("GFS Nolock Locking Module");
|
||||
MODULE_AUTHOR("Red Hat, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
@ -87,6 +87,8 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
|
|||
*/
|
||||
|
||||
static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
||||
__releases(&sdp->sd_log_lock)
|
||||
__acquires(&sdp->sd_log_lock)
|
||||
{
|
||||
struct gfs2_bufdata *bd, *s;
|
||||
struct buffer_head *bh;
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
*/
|
||||
|
||||
static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
|
||||
__acquires(&sdp->sd_log_lock)
|
||||
{
|
||||
spin_lock(&sdp->sd_log_lock);
|
||||
}
|
||||
|
@ -32,6 +33,7 @@ static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
|
|||
*/
|
||||
|
||||
static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
|
||||
__releases(&sdp->sd_log_lock)
|
||||
{
|
||||
spin_unlock(&sdp->sd_log_lock);
|
||||
}
|
||||
|
|
|
@ -40,8 +40,6 @@ static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo)
|
|||
INIT_HLIST_NODE(&gl->gl_list);
|
||||
spin_lock_init(&gl->gl_spin);
|
||||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters1);
|
||||
INIT_LIST_HEAD(&gl->gl_waiters3);
|
||||
gl->gl_lvb = NULL;
|
||||
atomic_set(&gl->gl_lvb_count, 0);
|
||||
INIT_LIST_HEAD(&gl->gl_reclaim);
|
||||
|
|
|
@ -129,7 +129,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
|
|||
}
|
||||
|
||||
/**
|
||||
* getbuf - Get a buffer with a given address space
|
||||
* gfs2_getbuf - Get a buffer with a given address space
|
||||
* @gl: the glock
|
||||
* @blkno: the block number (filesystem scope)
|
||||
* @create: 1 if the buffer should be created
|
||||
|
@ -137,7 +137,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
|
|||
* Returns: the buffer
|
||||
*/
|
||||
|
||||
static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
|
||||
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
|
||||
{
|
||||
struct address_space *mapping = gl->gl_aspace->i_mapping;
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
|
@ -205,7 +205,7 @@ static void meta_prep_new(struct buffer_head *bh)
|
|||
struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
bh = getbuf(gl, blkno, CREATE);
|
||||
bh = gfs2_getbuf(gl, blkno, CREATE);
|
||||
meta_prep_new(bh);
|
||||
return bh;
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
|
|||
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
||||
struct buffer_head **bhp)
|
||||
{
|
||||
*bhp = getbuf(gl, blkno, CREATE);
|
||||
*bhp = gfs2_getbuf(gl, blkno, CREATE);
|
||||
if (!buffer_uptodate(*bhp)) {
|
||||
ll_rw_block(READ_META, 1, bhp);
|
||||
if (flags & DIO_WAIT) {
|
||||
|
@ -346,7 +346,7 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
|
|||
struct buffer_head *bh;
|
||||
|
||||
while (blen) {
|
||||
bh = getbuf(ip->i_gl, bstart, NO_CREATE);
|
||||
bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
|
||||
if (bh) {
|
||||
lock_buffer(bh);
|
||||
gfs2_log_lock(sdp);
|
||||
|
@ -421,7 +421,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||
if (extlen > max_ra)
|
||||
extlen = max_ra;
|
||||
|
||||
first_bh = getbuf(gl, dblock, CREATE);
|
||||
first_bh = gfs2_getbuf(gl, dblock, CREATE);
|
||||
|
||||
if (buffer_uptodate(first_bh))
|
||||
goto out;
|
||||
|
@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||
extlen--;
|
||||
|
||||
while (extlen) {
|
||||
bh = getbuf(gl, dblock, CREATE);
|
||||
bh = gfs2_getbuf(gl, dblock, CREATE);
|
||||
|
||||
if (!buffer_uptodate(bh) && !buffer_locked(bh))
|
||||
ll_rw_block(READA, 1, &bh);
|
||||
|
|
|
@ -47,6 +47,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
|
|||
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
|
||||
int flags, struct buffer_head **bhp);
|
||||
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
|
||||
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create);
|
||||
|
||||
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
|
||||
int meta);
|
||||
|
|
|
@ -499,34 +499,34 @@ static int __gfs2_readpage(void *file, struct page *page)
|
|||
* @file: The file to read
|
||||
* @page: The page of the file
|
||||
*
|
||||
* This deals with the locking required. We use a trylock in order to
|
||||
* avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
|
||||
* in the event that we are unable to get the lock.
|
||||
* This deals with the locking required. We have to unlock and
|
||||
* relock the page in order to get the locking in the right
|
||||
* order.
|
||||
*/
|
||||
|
||||
static int gfs2_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
|
||||
struct gfs2_holder *gh;
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct gfs2_inode *ip = GFS2_I(mapping->host);
|
||||
struct gfs2_holder gh;
|
||||
int error;
|
||||
|
||||
gh = gfs2_glock_is_locked_by_me(ip->i_gl);
|
||||
if (!gh) {
|
||||
gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
|
||||
if (!gh)
|
||||
return -ENOBUFS;
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
|
||||
unlock_page(page);
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
|
||||
error = gfs2_glock_nq_atime(&gh);
|
||||
if (unlikely(error))
|
||||
goto out;
|
||||
error = AOP_TRUNCATED_PAGE;
|
||||
lock_page(page);
|
||||
if (page->mapping == mapping && !PageUptodate(page))
|
||||
error = __gfs2_readpage(file, page);
|
||||
else
|
||||
unlock_page(page);
|
||||
error = gfs2_glock_nq_atime(gh);
|
||||
if (likely(error != 0))
|
||||
goto out;
|
||||
return AOP_TRUNCATED_PAGE;
|
||||
}
|
||||
error = __gfs2_readpage(file, page);
|
||||
gfs2_glock_dq(gh);
|
||||
gfs2_glock_dq(&gh);
|
||||
out:
|
||||
gfs2_holder_uninit(gh);
|
||||
kfree(gh);
|
||||
gfs2_holder_uninit(&gh);
|
||||
if (error && error != AOP_TRUNCATED_PAGE)
|
||||
lock_page(page);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/uio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/gfs2_ondisk.h>
|
||||
#include <linux/ext2_fs.h>
|
||||
|
@ -133,7 +134,6 @@ static const u32 fsflags_to_gfs2[32] = {
|
|||
[7] = GFS2_DIF_NOATIME,
|
||||
[12] = GFS2_DIF_EXHASH,
|
||||
[14] = GFS2_DIF_INHERIT_JDATA,
|
||||
[20] = GFS2_DIF_INHERIT_DIRECTIO,
|
||||
};
|
||||
|
||||
static const u32 gfs2_to_fsflags[32] = {
|
||||
|
@ -142,7 +142,6 @@ static const u32 gfs2_to_fsflags[32] = {
|
|||
[gfs2fl_AppendOnly] = FS_APPEND_FL,
|
||||
[gfs2fl_NoAtime] = FS_NOATIME_FL,
|
||||
[gfs2fl_ExHash] = FS_INDEX_FL,
|
||||
[gfs2fl_InheritDirectio] = FS_DIRECTIO_FL,
|
||||
[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
|
||||
};
|
||||
|
||||
|
@ -160,12 +159,8 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
|
|||
return error;
|
||||
|
||||
fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags);
|
||||
if (!S_ISDIR(inode->i_mode)) {
|
||||
if (ip->i_di.di_flags & GFS2_DIF_JDATA)
|
||||
fsflags |= FS_JOURNAL_DATA_FL;
|
||||
if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
|
||||
fsflags |= FS_DIRECTIO_FL;
|
||||
}
|
||||
if (!S_ISDIR(inode->i_mode) && ip->i_di.di_flags & GFS2_DIF_JDATA)
|
||||
fsflags |= FS_JOURNAL_DATA_FL;
|
||||
if (put_user(fsflags, ptr))
|
||||
error = -EFAULT;
|
||||
|
||||
|
@ -194,13 +189,11 @@ void gfs2_set_inode_flags(struct inode *inode)
|
|||
|
||||
/* Flags that can be set by user space */
|
||||
#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
|
||||
GFS2_DIF_DIRECTIO| \
|
||||
GFS2_DIF_IMMUTABLE| \
|
||||
GFS2_DIF_APPENDONLY| \
|
||||
GFS2_DIF_NOATIME| \
|
||||
GFS2_DIF_SYNC| \
|
||||
GFS2_DIF_SYSTEM| \
|
||||
GFS2_DIF_INHERIT_DIRECTIO| \
|
||||
GFS2_DIF_INHERIT_JDATA)
|
||||
|
||||
/**
|
||||
|
@ -220,10 +213,14 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
|
|||
int error;
|
||||
u32 new_flags, flags;
|
||||
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
error = mnt_want_write(filp->f_path.mnt);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
if (error)
|
||||
goto out_drop_write;
|
||||
|
||||
flags = ip->i_di.di_flags;
|
||||
new_flags = (flags & ~mask) | (reqflags & mask);
|
||||
if ((new_flags ^ flags) == 0)
|
||||
|
@ -242,7 +239,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
|
|||
!capable(CAP_LINUX_IMMUTABLE))
|
||||
goto out;
|
||||
if (!IS_IMMUTABLE(inode)) {
|
||||
error = permission(inode, MAY_WRITE, NULL);
|
||||
error = gfs2_permission(inode, MAY_WRITE);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
@ -272,6 +269,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
|
|||
gfs2_trans_end(sdp);
|
||||
out:
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
out_drop_write:
|
||||
mnt_drop_write(filp->f_path.mnt);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -285,8 +284,6 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
|
|||
if (!S_ISDIR(inode->i_mode)) {
|
||||
if (gfsflags & GFS2_DIF_INHERIT_JDATA)
|
||||
gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
|
||||
if (gfsflags & GFS2_DIF_INHERIT_DIRECTIO)
|
||||
gfsflags ^= (GFS2_DIF_DIRECTIO | GFS2_DIF_INHERIT_DIRECTIO);
|
||||
return do_gfs2_set_flags(filp, gfsflags, ~0);
|
||||
}
|
||||
return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
|
||||
|
@ -487,11 +484,6 @@ static int gfs2_open(struct inode *inode, struct file *file)
|
|||
goto fail_gunlock;
|
||||
}
|
||||
|
||||
/* Listen to the Direct I/O flag */
|
||||
|
||||
if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
|
||||
file->f_flags |= O_DIRECT;
|
||||
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
}
|
||||
|
||||
|
@ -669,8 +661,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||
int error = 0;
|
||||
|
||||
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
|
||||
flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE
|
||||
| GL_FLOCK;
|
||||
flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
|
||||
|
||||
mutex_lock(&fp->f_fl_mutex);
|
||||
|
||||
|
@ -683,9 +674,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||
gfs2_glock_dq_wait(fl_gh);
|
||||
gfs2_holder_reinit(state, flags, fl_gh);
|
||||
} else {
|
||||
error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
|
||||
ip->i_no_addr, &gfs2_flock_glops,
|
||||
CREATE, &gl);
|
||||
error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
|
||||
&gfs2_flock_glops, CREATE, &gl);
|
||||
if (error)
|
||||
goto out;
|
||||
gfs2_holder_init(gl, state, flags, fl_gh);
|
||||
|
|
|
@ -64,7 +64,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
mutex_init(&sdp->sd_rindex_mutex);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_mru_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_recent_list);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_jindex_list);
|
||||
spin_lock_init(&sdp->sd_jindex_spin);
|
||||
|
@ -364,6 +363,8 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
|
|||
|
||||
static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
|
||||
{
|
||||
if (!sdp->sd_lockstruct.ls_ops->lm_others_may_mount)
|
||||
return;
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
|
||||
sdp->sd_lockstruct.ls_lockspace);
|
||||
|
@ -741,8 +742,7 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
|
||||
gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
|
||||
if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
|
||||
gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
|
||||
GFS2_MIN_LVB_SIZE)) {
|
||||
gfs2_unmount_lockproto(&sdp->sd_lockstruct);
|
||||
|
@ -873,7 +873,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
|
|||
fail_locking:
|
||||
init_locking(sdp, &mount_gh, UNDO);
|
||||
fail_lm:
|
||||
gfs2_gl_hash_clear(sdp, WAIT);
|
||||
gfs2_gl_hash_clear(sdp);
|
||||
gfs2_lm_unmount(sdp);
|
||||
while (invalidate_inodes(sb))
|
||||
yield();
|
||||
|
|
|
@ -163,7 +163,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
|
|||
if (error)
|
||||
goto out;
|
||||
|
||||
error = permission(dir, MAY_WRITE | MAY_EXEC, NULL);
|
||||
error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC);
|
||||
if (error)
|
||||
goto out_gunlock;
|
||||
|
||||
|
@ -669,7 +669,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
error = permission(ndir, MAY_WRITE | MAY_EXEC, NULL);
|
||||
error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC);
|
||||
if (error)
|
||||
goto out_gunlock;
|
||||
|
||||
|
@ -704,7 +704,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
|||
/* Check out the dir to be renamed */
|
||||
|
||||
if (dir_rename) {
|
||||
error = permission(odentry->d_inode, MAY_WRITE, NULL);
|
||||
error = gfs2_permission(odentry->d_inode, MAY_WRITE);
|
||||
if (error)
|
||||
goto out_gunlock;
|
||||
}
|
||||
|
@ -891,7 +891,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|||
* Returns: errno
|
||||
*/
|
||||
|
||||
static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
|
||||
int gfs2_permission(struct inode *inode, int mask)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_holder i_gh;
|
||||
|
@ -905,13 +905,22 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
|
|||
unlock = 1;
|
||||
}
|
||||
|
||||
error = generic_permission(inode, mask, gfs2_check_acl);
|
||||
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
|
||||
error = -EACCES;
|
||||
else
|
||||
error = generic_permission(inode, mask, gfs2_check_acl);
|
||||
if (unlock)
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int gfs2_iop_permission(struct inode *inode, int mask,
|
||||
struct nameidata *nd)
|
||||
{
|
||||
return gfs2_permission(inode, mask);
|
||||
}
|
||||
|
||||
static int setattr_size(struct inode *inode, struct iattr *attr)
|
||||
{
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
|
@ -1141,7 +1150,7 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name)
|
|||
}
|
||||
|
||||
const struct inode_operations gfs2_file_iops = {
|
||||
.permission = gfs2_permission,
|
||||
.permission = gfs2_iop_permission,
|
||||
.setattr = gfs2_setattr,
|
||||
.getattr = gfs2_getattr,
|
||||
.setxattr = gfs2_setxattr,
|
||||
|
@ -1160,7 +1169,7 @@ const struct inode_operations gfs2_dir_iops = {
|
|||
.rmdir = gfs2_rmdir,
|
||||
.mknod = gfs2_mknod,
|
||||
.rename = gfs2_rename,
|
||||
.permission = gfs2_permission,
|
||||
.permission = gfs2_iop_permission,
|
||||
.setattr = gfs2_setattr,
|
||||
.getattr = gfs2_getattr,
|
||||
.setxattr = gfs2_setxattr,
|
||||
|
@ -1172,7 +1181,7 @@ const struct inode_operations gfs2_dir_iops = {
|
|||
const struct inode_operations gfs2_symlink_iops = {
|
||||
.readlink = gfs2_readlink,
|
||||
.follow_link = gfs2_follow_link,
|
||||
.permission = gfs2_permission,
|
||||
.permission = gfs2_iop_permission,
|
||||
.setattr = gfs2_setattr,
|
||||
.getattr = gfs2_getattr,
|
||||
.setxattr = gfs2_setxattr,
|
||||
|
|
|
@ -126,7 +126,7 @@ static void gfs2_put_super(struct super_block *sb)
|
|||
gfs2_clear_rgrpd(sdp);
|
||||
gfs2_jindex_free(sdp);
|
||||
/* Take apart glock structures and buffer lists */
|
||||
gfs2_gl_hash_clear(sdp, WAIT);
|
||||
gfs2_gl_hash_clear(sdp);
|
||||
/* Unmount the locking protocol */
|
||||
gfs2_lm_unmount(sdp);
|
||||
|
||||
|
@ -155,7 +155,7 @@ static void gfs2_write_super(struct super_block *sb)
|
|||
static int gfs2_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
sb->s_dirt = 0;
|
||||
if (wait)
|
||||
if (wait && sb->s_fs_info)
|
||||
gfs2_log_flush(sb->s_fs_info, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -904,7 +904,7 @@ static int need_sync(struct gfs2_quota_data *qd)
|
|||
do_sync = 0;
|
||||
else {
|
||||
value *= gfs2_jindex_size(sdp) * num;
|
||||
do_div(value, den);
|
||||
value = div_s64(value, den);
|
||||
value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
|
||||
if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
|
||||
do_sync = 0;
|
||||
|
|
|
@ -428,6 +428,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
|
|||
static void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
|
||||
unsigned int message)
|
||||
{
|
||||
if (!sdp->sd_lockstruct.ls_ops->lm_recovery_done)
|
||||
return;
|
||||
|
||||
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
||||
sdp->sd_lockstruct.ls_ops->lm_recovery_done(
|
||||
sdp->sd_lockstruct.ls_lockspace, jid, message);
|
||||
|
@ -505,7 +508,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
|
|||
|
||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
|
||||
GL_NOCANCEL | GL_NOCACHE, &t_gh);
|
||||
GL_NOCACHE, &t_gh);
|
||||
if (error)
|
||||
goto fail_gunlock_ji;
|
||||
|
||||
|
|
108
fs/gfs2/rgrp.c
108
fs/gfs2/rgrp.c
|
@ -371,11 +371,6 @@ static void clear_rgrpdi(struct gfs2_sbd *sdp)
|
|||
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
sdp->sd_rindex_forward = NULL;
|
||||
head = &sdp->sd_rindex_recent_list;
|
||||
while (!list_empty(head)) {
|
||||
rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
|
||||
list_del(&rgd->rd_recent);
|
||||
}
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
|
||||
head = &sdp->sd_rindex_list;
|
||||
|
@ -944,107 +939,30 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* recent_rgrp_first - get first RG from "recent" list
|
||||
* @sdp: The GFS2 superblock
|
||||
* @rglast: address of the rgrp used last
|
||||
*
|
||||
* Returns: The first rgrp in the recent list
|
||||
*/
|
||||
|
||||
static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
|
||||
u64 rglast)
|
||||
{
|
||||
struct gfs2_rgrpd *rgd;
|
||||
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
|
||||
if (rglast) {
|
||||
list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
|
||||
if (rgrp_contains_block(rgd, rglast))
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
rgd = NULL;
|
||||
if (!list_empty(&sdp->sd_rindex_recent_list))
|
||||
rgd = list_entry(sdp->sd_rindex_recent_list.next,
|
||||
struct gfs2_rgrpd, rd_recent);
|
||||
out:
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
return rgd;
|
||||
}
|
||||
|
||||
/**
|
||||
* recent_rgrp_next - get next RG from "recent" list
|
||||
* @cur_rgd: current rgrp
|
||||
* @remove:
|
||||
*
|
||||
* Returns: The next rgrp in the recent list
|
||||
*/
|
||||
|
||||
static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
|
||||
int remove)
|
||||
static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd)
|
||||
{
|
||||
struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
|
||||
struct list_head *head;
|
||||
struct gfs2_rgrpd *rgd;
|
||||
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
|
||||
head = &sdp->sd_rindex_recent_list;
|
||||
|
||||
list_for_each_entry(rgd, head, rd_recent) {
|
||||
if (rgd == cur_rgd) {
|
||||
if (cur_rgd->rd_recent.next != head)
|
||||
rgd = list_entry(cur_rgd->rd_recent.next,
|
||||
struct gfs2_rgrpd, rd_recent);
|
||||
else
|
||||
rgd = NULL;
|
||||
|
||||
if (remove)
|
||||
list_del(&cur_rgd->rd_recent);
|
||||
|
||||
goto out;
|
||||
}
|
||||
head = &sdp->sd_rindex_mru_list;
|
||||
if (unlikely(cur_rgd->rd_list_mru.next == head)) {
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rgd = NULL;
|
||||
if (!list_empty(head))
|
||||
rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
|
||||
|
||||
out:
|
||||
rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru);
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
return rgd;
|
||||
}
|
||||
|
||||
/**
|
||||
* recent_rgrp_add - add an RG to tail of "recent" list
|
||||
* @new_rgd: The rgrp to add
|
||||
*
|
||||
*/
|
||||
|
||||
static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
|
||||
{
|
||||
struct gfs2_sbd *sdp = new_rgd->rd_sbd;
|
||||
struct gfs2_rgrpd *rgd;
|
||||
unsigned int count = 0;
|
||||
unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
|
||||
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
|
||||
list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
|
||||
if (rgd == new_rgd)
|
||||
goto out;
|
||||
|
||||
if (++count >= max)
|
||||
goto out;
|
||||
}
|
||||
list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
|
||||
|
||||
out:
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
}
|
||||
|
||||
/**
|
||||
* forward_rgrp_get - get an rgrp to try next from full list
|
||||
* @sdp: The GFS2 superblock
|
||||
|
@ -1112,9 +1030,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
|
|||
int loops = 0;
|
||||
int error, rg_locked;
|
||||
|
||||
/* Try recently successful rgrps */
|
||||
|
||||
rgd = recent_rgrp_first(sdp, ip->i_goal);
|
||||
rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
|
||||
|
||||
while (rgd) {
|
||||
rg_locked = 0;
|
||||
|
@ -1136,11 +1052,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
|
|||
gfs2_glock_dq_uninit(&al->al_rgd_gh);
|
||||
if (inode)
|
||||
return inode;
|
||||
rgd = recent_rgrp_next(rgd, 1);
|
||||
break;
|
||||
|
||||
/* fall through */
|
||||
case GLR_TRYFAILED:
|
||||
rgd = recent_rgrp_next(rgd, 0);
|
||||
rgd = recent_rgrp_next(rgd);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1199,7 +1113,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
|
|||
|
||||
out:
|
||||
if (begin) {
|
||||
recent_rgrp_add(rgd);
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
rgd = gfs2_rgrpd_get_next(rgd);
|
||||
if (!rgd)
|
||||
rgd = gfs2_rgrpd_get_first(sdp);
|
||||
|
|
|
@ -65,7 +65,6 @@ void gfs2_tune_init(struct gfs2_tune *gt)
|
|||
gt->gt_quota_quantum = 60;
|
||||
gt->gt_atime_quantum = 3600;
|
||||
gt->gt_new_files_jdata = 0;
|
||||
gt->gt_new_files_directio = 0;
|
||||
gt->gt_max_readahead = 1 << 18;
|
||||
gt->gt_stall_secs = 600;
|
||||
gt->gt_complain_secs = 10;
|
||||
|
@ -941,8 +940,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
|
|||
}
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
|
||||
LM_FLAG_PRIORITY | GL_NOCACHE,
|
||||
t_gh);
|
||||
GL_NOCACHE, t_gh);
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
error = gfs2_jdesc_check(jd);
|
||||
|
|
|
@ -110,18 +110,6 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
|
|||
return len;
|
||||
}
|
||||
|
||||
static ssize_t shrink_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
{
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (simple_strtol(buf, NULL, 0) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
gfs2_gl_hash_clear(sdp, NO_WAIT);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
|
@ -175,7 +163,6 @@ static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
|
|||
GFS2_ATTR(id, 0444, id_show, NULL);
|
||||
GFS2_ATTR(fsname, 0444, fsname_show, NULL);
|
||||
GFS2_ATTR(freeze, 0644, freeze_show, freeze_store);
|
||||
GFS2_ATTR(shrink, 0200, NULL, shrink_store);
|
||||
GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
|
||||
GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store);
|
||||
GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store);
|
||||
|
@ -186,7 +173,6 @@ static struct attribute *gfs2_attrs[] = {
|
|||
&gfs2_attr_id.attr,
|
||||
&gfs2_attr_fsname.attr,
|
||||
&gfs2_attr_freeze.attr,
|
||||
&gfs2_attr_shrink.attr,
|
||||
&gfs2_attr_withdraw.attr,
|
||||
&gfs2_attr_statfs_sync.attr,
|
||||
&gfs2_attr_quota_sync.attr,
|
||||
|
@ -426,7 +412,6 @@ TUNE_ATTR(max_readahead, 0);
|
|||
TUNE_ATTR(complain_secs, 0);
|
||||
TUNE_ATTR(statfs_slow, 0);
|
||||
TUNE_ATTR(new_files_jdata, 0);
|
||||
TUNE_ATTR(new_files_directio, 0);
|
||||
TUNE_ATTR(quota_simul_sync, 1);
|
||||
TUNE_ATTR(quota_cache_secs, 1);
|
||||
TUNE_ATTR(stall_secs, 1);
|
||||
|
@ -455,7 +440,6 @@ static struct attribute *tune_attrs[] = {
|
|||
&tune_attr_quotad_secs.attr,
|
||||
&tune_attr_quota_scale.attr,
|
||||
&tune_attr_new_files_jdata.attr,
|
||||
&tune_attr_new_files_directio.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -122,11 +122,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
|
|||
*/
|
||||
|
||||
#define LM_OUT_ST_MASK 0x00000003
|
||||
#define LM_OUT_CACHEABLE 0x00000004
|
||||
#define LM_OUT_CANCELED 0x00000008
|
||||
#define LM_OUT_ASYNC 0x00000080
|
||||
#define LM_OUT_ERROR 0x00000100
|
||||
#define LM_OUT_CONV_DEADLK 0x00000200
|
||||
|
||||
/*
|
||||
* lm_callback_t types
|
||||
|
@ -138,9 +136,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
|
|||
* LM_CB_NEED_RECOVERY
|
||||
* The given journal needs to be recovered.
|
||||
*
|
||||
* LM_CB_DROPLOCKS
|
||||
* Reduce the number of cached locks.
|
||||
*
|
||||
* LM_CB_ASYNC
|
||||
* The given lock has been granted.
|
||||
*/
|
||||
|
@ -149,7 +144,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
|
|||
#define LM_CB_NEED_D 258
|
||||
#define LM_CB_NEED_S 259
|
||||
#define LM_CB_NEED_RECOVERY 260
|
||||
#define LM_CB_DROPLOCKS 261
|
||||
#define LM_CB_ASYNC 262
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue