NTFS: Fix cluster (de)allocators to work when the runlist is NULL and more

importantly to take a locked runlist rather than them locking it
      which leads to lock reversal.

Signed-off-by: Anton Altaparmakov <aia21@cantab.net>
This commit is contained in:
Anton Altaparmakov 2005-09-08 21:09:06 +01:00
parent 807c453de7
commit bbf1813fb8
4 changed files with 32 additions and 33 deletions

View file

@ -70,6 +70,9 @@ ToDo/Notes:
- Add BUG() checks to ntfs_attr_make_non_resident() and ntfs_attr_set() - Add BUG() checks to ntfs_attr_make_non_resident() and ntfs_attr_set()
to ensure that these functions are never called for compressed or to ensure that these functions are never called for compressed or
encrypted attributes. encrypted attributes.
- Fix cluster (de)allocators to work when the runlist is NULL and more
importantly to take a locked runlist rather than them locking it
which leads to lock reversal.
2.1.23 - Implement extension of resident files and make writing safe as well as 2.1.23 - Implement extension of resident files and make writing safe as well as
many bug fixes, cleanups, and enhancements... many bug fixes, cleanups, and enhancements...

View file

@ -54,6 +54,8 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
int ret = 0; int ret = 0;
ntfs_debug("Entering."); ntfs_debug("Entering.");
if (!rl)
return 0;
for (; rl->length; rl++) { for (; rl->length; rl++) {
int err; int err;
@ -163,17 +165,9 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
BUG_ON(zone < FIRST_ZONE); BUG_ON(zone < FIRST_ZONE);
BUG_ON(zone > LAST_ZONE); BUG_ON(zone > LAST_ZONE);
/* Return empty runlist if @count == 0 */ /* Return NULL if @count is zero. */
// FIXME: Do we want to just return NULL instead? (AIA) if (!count)
if (!count) { return NULL;
rl = ntfs_malloc_nofs(PAGE_SIZE);
if (!rl)
return ERR_PTR(-ENOMEM);
rl[0].vcn = start_vcn;
rl[0].lcn = LCN_RL_NOT_MAPPED;
rl[0].length = 0;
return rl;
}
/* Take the lcnbmp lock for writing. */ /* Take the lcnbmp lock for writing. */
down_write(&vol->lcnbmp_lock); down_write(&vol->lcnbmp_lock);
/* /*
@ -788,7 +782,8 @@ switch_to_data1_zone: search_zone = 2;
* @vi: vfs inode whose runlist describes the clusters to free * @vi: vfs inode whose runlist describes the clusters to free
* @start_vcn: vcn in the runlist of @vi at which to start freeing clusters * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters
* @count: number of clusters to free or -1 for all clusters * @count: number of clusters to free or -1 for all clusters
* @is_rollback: if TRUE this is a rollback operation * @write_locked: true if the runlist is locked for writing
* @is_rollback: true if this is a rollback operation
* *
* Free @count clusters starting at the cluster @start_vcn in the runlist * Free @count clusters starting at the cluster @start_vcn in the runlist
* described by the vfs inode @vi. * described by the vfs inode @vi.
@ -806,17 +801,17 @@ switch_to_data1_zone: search_zone = 2;
* Return the number of deallocated clusters (not counting sparse ones) on * Return the number of deallocated clusters (not counting sparse ones) on
* success and -errno on error. * success and -errno on error.
* *
* Locking: - The runlist described by @vi must be unlocked on entry and is * Locking: - The runlist described by @vi must be locked on entry and is
* unlocked on return. * locked on return. Note if the runlist is locked for reading the
* - This function takes the runlist lock of @vi for reading and * lock may be dropped and reacquired. Note the runlist may be
* sometimes for writing and sometimes modifies the runlist. * modified when needed runlist fragments need to be mapped.
* - The volume lcn bitmap must be unlocked on entry and is unlocked * - The volume lcn bitmap must be unlocked on entry and is unlocked
* on return. * on return.
* - This function takes the volume lcn bitmap lock for writing and * - This function takes the volume lcn bitmap lock for writing and
* modifies the bitmap contents. * modifies the bitmap contents.
*/ */
s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
const BOOL is_rollback) const BOOL write_locked, const BOOL is_rollback)
{ {
s64 delta, to_free, total_freed, real_freed; s64 delta, to_free, total_freed, real_freed;
ntfs_inode *ni; ntfs_inode *ni;
@ -848,8 +843,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
total_freed = real_freed = 0; total_freed = real_freed = 0;
down_read(&ni->runlist.lock); rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, write_locked);
rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, FALSE);
if (IS_ERR(rl)) { if (IS_ERR(rl)) {
if (!is_rollback) if (!is_rollback)
ntfs_error(vol->sb, "Failed to find first runlist " ntfs_error(vol->sb, "Failed to find first runlist "
@ -903,7 +897,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
/* Attempt to map runlist. */ /* Attempt to map runlist. */
vcn = rl->vcn; vcn = rl->vcn;
rl = ntfs_attr_find_vcn_nolock(ni, vcn, FALSE); rl = ntfs_attr_find_vcn_nolock(ni, vcn, write_locked);
if (IS_ERR(rl)) { if (IS_ERR(rl)) {
err = PTR_ERR(rl); err = PTR_ERR(rl);
if (!is_rollback) if (!is_rollback)
@ -950,7 +944,6 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
/* Update the total done clusters. */ /* Update the total done clusters. */
total_freed += to_free; total_freed += to_free;
} }
up_read(&ni->runlist.lock);
if (likely(!is_rollback)) if (likely(!is_rollback))
up_write(&vol->lcnbmp_lock); up_write(&vol->lcnbmp_lock);
@ -960,7 +953,6 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
ntfs_debug("Done."); ntfs_debug("Done.");
return real_freed; return real_freed;
err_out: err_out:
up_read(&ni->runlist.lock);
if (is_rollback) if (is_rollback)
return err; return err;
/* If no real clusters were freed, no need to rollback. */ /* If no real clusters were freed, no need to rollback. */
@ -973,7 +965,8 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count,
* If rollback fails, set the volume errors flag, emit an error * If rollback fails, set the volume errors flag, emit an error
* message, and return the error code. * message, and return the error code.
*/ */
delta = __ntfs_cluster_free(vi, start_vcn, total_freed, TRUE); delta = __ntfs_cluster_free(vi, start_vcn, total_freed, write_locked,
TRUE);
if (delta < 0) { if (delta < 0) {
ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving "
"inconsistent metadata! Unmount and run " "inconsistent metadata! Unmount and run "

View file

@ -43,13 +43,14 @@ extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol,
const NTFS_CLUSTER_ALLOCATION_ZONES zone); const NTFS_CLUSTER_ALLOCATION_ZONES zone);
extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn,
s64 count, const BOOL is_rollback); s64 count, const BOOL write_locked, const BOOL is_rollback);
/** /**
* ntfs_cluster_free - free clusters on an ntfs volume * ntfs_cluster_free - free clusters on an ntfs volume
* @vi: vfs inode whose runlist describes the clusters to free * @vi: vfs inode whose runlist describes the clusters to free
* @start_vcn: vcn in the runlist of @vi at which to start freeing clusters * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters
* @count: number of clusters to free or -1 for all clusters * @count: number of clusters to free or -1 for all clusters
* @write_locked: true if the runlist is locked for writing
* *
* Free @count clusters starting at the cluster @start_vcn in the runlist * Free @count clusters starting at the cluster @start_vcn in the runlist
* described by the vfs inode @vi. * described by the vfs inode @vi.
@ -64,19 +65,19 @@ extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn,
* Return the number of deallocated clusters (not counting sparse ones) on * Return the number of deallocated clusters (not counting sparse ones) on
* success and -errno on error. * success and -errno on error.
* *
* Locking: - The runlist described by @vi must be unlocked on entry and is * Locking: - The runlist described by @vi must be locked on entry and is
* unlocked on return. * locked on return. Note if the runlist is locked for reading the
* - This function takes the runlist lock of @vi for reading and * lock may be dropped and reacquired. Note the runlist may be
* sometimes for writing and sometimes modifies the runlist. * modified when needed runlist fragments need to be mapped.
* - The volume lcn bitmap must be unlocked on entry and is unlocked * - The volume lcn bitmap must be unlocked on entry and is unlocked
* on return. * on return.
* - This function takes the volume lcn bitmap lock for writing and * - This function takes the volume lcn bitmap lock for writing and
* modifies the bitmap contents. * modifies the bitmap contents.
*/ */
static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn, static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn,
s64 count) s64 count, const BOOL write_locked)
{ {
return __ntfs_cluster_free(vi, start_vcn, count, FALSE); return __ntfs_cluster_free(vi, start_vcn, count, write_locked, FALSE);
} }
extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
@ -93,8 +94,10 @@ extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
* *
* Return 0 on success and -errno on error. * Return 0 on success and -errno on error.
* *
* Locking: This function takes the volume lcn bitmap lock for writing and * Locking: - This function takes the volume lcn bitmap lock for writing and
* modifies the bitmap contents. * modifies the bitmap contents.
* - The caller must have locked the runlist @rl for reading or
* writing.
*/ */
static inline int ntfs_cluster_free_from_rl(ntfs_volume *vol, static inline int ntfs_cluster_free_from_rl(ntfs_volume *vol,
const runlist_element *rl) const runlist_element *rl)

View file

@ -1953,7 +1953,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
a = ctx->attr; a = ctx->attr;
a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1); a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1);
undo_alloc: undo_alloc:
if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1) < 0) { if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1, TRUE) < 0) {
ntfs_error(vol->sb, "Failed to free clusters from mft data " ntfs_error(vol->sb, "Failed to free clusters from mft data "
"attribute.%s", es); "attribute.%s", es);
NVolSetErrors(vol); NVolSetErrors(vol);