Merge git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6
This commit is contained in:
commit
dd05e42fa8
14 changed files with 3843 additions and 1045 deletions
|
@ -50,9 +50,14 @@ userspace utilities, etc.
|
|||
Features
|
||||
========
|
||||
|
||||
- This is a complete rewrite of the NTFS driver that used to be in the kernel.
|
||||
This new driver implements NTFS read support and is functionally equivalent
|
||||
to the old ntfs driver.
|
||||
- This is a complete rewrite of the NTFS driver that used to be in the 2.4 and
|
||||
earlier kernels. This new driver implements NTFS read support and is
|
||||
functionally equivalent to the old ntfs driver and it also implements limited
|
||||
write support. The biggest limitation at present is that files/directories
|
||||
cannot be created or deleted. See below for the list of write features that
|
||||
are so far supported. Another limitation is that writing to compressed files
|
||||
is not implemented at all. Also, neither read nor write access to encrypted
|
||||
files is so far implemented.
|
||||
- The new driver has full support for sparse files on NTFS 3.x volumes which
|
||||
the old driver isn't happy with.
|
||||
- The new driver supports execution of binaries due to mmap() now being
|
||||
|
@ -78,7 +83,20 @@ Features
|
|||
- The new driver supports fsync(2), fdatasync(2), and msync(2).
|
||||
- The new driver supports readv(2) and writev(2).
|
||||
- The new driver supports access time updates (including mtime and ctime).
|
||||
|
||||
- The new driver supports truncate(2) and open(2) with O_TRUNC. But at present
|
||||
only very limited support for highly fragmented files, i.e. ones which have
|
||||
their data attribute split across multiple extents, is included. Another
|
||||
limitation is that at present truncate(2) will never create sparse files,
|
||||
since to mark a file sparse we need to modify the directory entry for the
|
||||
file and we do not implement directory modifications yet.
|
||||
- The new driver supports write(2) which can both overwrite existing data and
|
||||
extend the file size so that you can write beyond the existing data. Also,
|
||||
writing into sparse regions is supported and the holes are filled in with
|
||||
clusters. But at present only limited support for highly fragmented files,
|
||||
i.e. ones which have their data attribute split across multiple extents, is
|
||||
included. Another limitation is that write(2) will never create sparse
|
||||
files, since to mark a file sparse we need to modify the directory entry for
|
||||
the file and we do not implement directory modifications yet.
|
||||
|
||||
Supported mount options
|
||||
=======================
|
||||
|
@ -439,6 +457,22 @@ ChangeLog
|
|||
|
||||
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
|
||||
|
||||
2.1.25:
|
||||
- Write support is now extended with write(2) being able to both
|
||||
overwrite existing file data and to extend files. Also, if a write
|
||||
to a sparse region occurs, write(2) will fill in the hole. Note,
|
||||
mmap(2) based writes still do not support writing into holes or
|
||||
writing beyond the initialized size.
|
||||
- Write support has a new feature and that is that truncate(2) and
|
||||
open(2) with O_TRUNC are now implemented thus files can be both made
|
||||
smaller and larger.
|
||||
- Note: Both write(2) and truncate(2)/open(2) with O_TRUNC still have
|
||||
limitations in that they
|
||||
- only provide limited support for highly fragmented files.
|
||||
- only work on regular, i.e. uncompressed and unencrypted files.
|
||||
- never create sparse files although this will change once directory
|
||||
operations are implemented.
|
||||
- Lots of bug fixes and enhancements across the board.
|
||||
2.1.24:
|
||||
- Support journals ($LogFile) which have been modified by chkdsk. This
|
||||
means users can boot into Windows after we marked the volume dirty.
|
||||
|
|
|
@ -1,18 +1,15 @@
|
|||
ToDo/Notes:
|
||||
- Find and fix bugs.
|
||||
- In between ntfs_prepare/commit_write, need exclusion between
|
||||
simultaneous file extensions. This is given to us by holding i_sem
|
||||
on the inode. The only places in the kernel when a file is resized
|
||||
are prepare/commit write and truncate for both of which i_sem is
|
||||
held. Just have to be careful in readpage/writepage and all other
|
||||
helpers not running under i_sem that we play nice...
|
||||
Also need to be careful with initialized_size extention in
|
||||
ntfs_prepare_write. Basically, just be _very_ careful in this code...
|
||||
UPDATE: The only things that need to be checked are read/writepage
|
||||
which do not hold i_sem. Note writepage cannot change i_size but it
|
||||
needs to cope with a concurrent i_size change, just like readpage.
|
||||
Also both need to cope with concurrent changes to the other sizes,
|
||||
i.e. initialized/allocated/compressed size, as well.
|
||||
- The only places in the kernel where a file is resized are
|
||||
ntfs_file_write*() and ntfs_truncate() for both of which i_sem is
|
||||
held. Just have to be careful in read-/writepage and other helpers
|
||||
not running under i_sem that we play nice... Also need to be careful
|
||||
with initialized_size extension in ntfs_file_write*() and writepage.
|
||||
UPDATE: The only things that need to be checked are the compressed
|
||||
write and the other attribute resize/write cases like index
|
||||
attributes, etc. For now none of these are implemented so are safe.
|
||||
- Implement filling in of holes in aops.c::ntfs_writepage() and its
|
||||
helpers.
|
||||
- Implement mft.c::sync_mft_mirror_umount(). We currently will just
|
||||
leave the volume dirty on umount if the final iput(vol->mft_ino)
|
||||
causes a write of any mirrored mft records due to the mft mirror
|
||||
|
@ -22,6 +19,68 @@ ToDo/Notes:
|
|||
- Enable the code for setting the NT4 compatibility flag when we start
|
||||
making NTFS 1.2 specific modifications.
|
||||
|
||||
2.1.25 - (Almost) fully implement write(2) and truncate(2).
|
||||
|
||||
- Change ntfs_map_runlist_nolock(), ntfs_attr_find_vcn_nolock() and
|
||||
{__,}ntfs_cluster_free() to also take an optional attribute search
|
||||
context as argument. This allows calling these functions with the
|
||||
mft record mapped. Update all callers.
|
||||
- Fix potential deadlock in ntfs_mft_data_extend_allocation_nolock()
|
||||
error handling by passing in the active search context when calling
|
||||
ntfs_cluster_free().
|
||||
- Change ntfs_cluster_alloc() to take an extra boolean parameter
|
||||
specifying whether the cluster are being allocated to extend an
|
||||
attribute or to fill a hole.
|
||||
- Change ntfs_attr_make_non_resident() to call ntfs_cluster_alloc()
|
||||
with @is_extension set to TRUE and remove the runlist terminator
|
||||
fixup code as this is now done by ntfs_cluster_alloc().
|
||||
- Change ntfs_attr_make_non_resident to take the attribute value size
|
||||
as an extra parameter. This is needed since we need to know the size
|
||||
before we can map the mft record and our callers always know it. The
|
||||
reason we cannot simply read the size from the vfs inode i_size is
|
||||
that this is not necessarily uptodate. This happens when
|
||||
ntfs_attr_make_non_resident() is called in the ->truncate call path.
|
||||
- Fix ntfs_attr_make_non_resident() to update the vfs inode i_blocks
|
||||
which is zero for a resident attribute but should no longer be zero
|
||||
once the attribute is non-resident as it then has real clusters
|
||||
allocated.
|
||||
- Add fs/ntfs/attrib.[hc]::ntfs_attr_extend_allocation(), a function to
|
||||
extend the allocation of an attributes. Optionally, the data size,
|
||||
but not the initialized size can be extended, too.
|
||||
- Implement fs/ntfs/inode.[hc]::ntfs_truncate(). It only supports
|
||||
uncompressed and unencrypted files and it never creates sparse files
|
||||
at least for the moment (making a file sparse requires us to modify
|
||||
its directory entries and we do not support directory operations at
|
||||
the moment). Also, support for highly fragmented files, i.e. ones
|
||||
whose data attribute is split across multiple extents, is severly
|
||||
limited. When such a case is encountered, EOPNOTSUPP is returned.
|
||||
- Enable ATTR_SIZE attribute changes in ntfs_setattr(). This completes
|
||||
the initial implementation of file truncation. Now both open(2)ing
|
||||
a file with the O_TRUNC flag and the {,f}truncate(2) system calls
|
||||
will resize a file appropriately. The limitations are that only
|
||||
uncompressed and unencrypted files are supported. Also, there is
|
||||
only very limited support for highly fragmented files (the ones whose
|
||||
$DATA attribute is split into multiple attribute extents).
|
||||
- In attrib.c::ntfs_attr_set() call balance_dirty_pages_ratelimited()
|
||||
and cond_resched() in the main loop as we could be dirtying a lot of
|
||||
pages and this ensures we play nice with the VM and the system as a
|
||||
whole.
|
||||
- Implement file operations ->write, ->aio_write, ->writev for regular
|
||||
files. This replaces the old use of generic_file_write(), et al and
|
||||
the address space operations ->prepare_write and ->commit_write.
|
||||
This means that both sparse and non-sparse (unencrypted and
|
||||
uncompressed) files can now be extended using the normal write(2)
|
||||
code path. There are two limitations at present and these are that
|
||||
we never create sparse files and that we only have limited support
|
||||
for highly fragmented files, i.e. ones whose data attribute is split
|
||||
across multiple extents. When such a case is encountered,
|
||||
EOPNOTSUPP is returned.
|
||||
- $EA attributes can be both resident and non-resident.
|
||||
- Use %z for size_t to fix compilation warnings. (Andrew Morton)
|
||||
- Fix compilation warnings with gcc-4.0.2 on SUSE 10.0.
|
||||
- Document extended attribute ($EA) NEED_EA flag. (Based on libntfs
|
||||
patch by Yura Pakhuchiy.)
|
||||
|
||||
2.1.24 - Lots of bug fixes and support more clean journal states.
|
||||
|
||||
- Support journals ($LogFile) which have been modified by chkdsk. This
|
||||
|
|
|
@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
|
|||
index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
|
||||
unistr.o upcase.o
|
||||
|
||||
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.24\"
|
||||
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.25\"
|
||||
|
||||
ifeq ($(CONFIG_NTFS_DEBUG),y)
|
||||
EXTRA_CFLAGS += -DDEBUG
|
||||
|
|
832
fs/ntfs/aops.c
832
fs/ntfs/aops.c
|
@ -1391,8 +1391,7 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
if (NInoEncrypted(ni)) {
|
||||
unlock_page(page);
|
||||
BUG_ON(ni->type != AT_DATA);
|
||||
ntfs_debug("Denying write access to encrypted "
|
||||
"file.");
|
||||
ntfs_debug("Denying write access to encrypted file.");
|
||||
return -EACCES;
|
||||
}
|
||||
/* Compressed data streams are handled in compress.c. */
|
||||
|
@ -1508,8 +1507,8 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
/* Zero out of bounds area in the page cache page. */
|
||||
memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
flush_dcache_page(page);
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
/* We are done with the page. */
|
||||
end_page_writeback(page);
|
||||
/* Finally, mark the mft record dirty, so it gets written back. */
|
||||
|
@ -1542,830 +1541,6 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ntfs_prepare_nonresident_write -
|
||||
*
|
||||
*/
|
||||
static int ntfs_prepare_nonresident_write(struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
VCN vcn;
|
||||
LCN lcn;
|
||||
s64 initialized_size;
|
||||
loff_t i_size;
|
||||
sector_t block, ablock, iblock;
|
||||
struct inode *vi;
|
||||
ntfs_inode *ni;
|
||||
ntfs_volume *vol;
|
||||
runlist_element *rl;
|
||||
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
|
||||
unsigned long flags;
|
||||
unsigned int vcn_ofs, block_start, block_end, blocksize;
|
||||
int err;
|
||||
BOOL is_retry;
|
||||
unsigned char blocksize_bits;
|
||||
|
||||
vi = page->mapping->host;
|
||||
ni = NTFS_I(vi);
|
||||
vol = ni->vol;
|
||||
|
||||
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
|
||||
"0x%lx, from = %u, to = %u.", ni->mft_no, ni->type,
|
||||
page->index, from, to);
|
||||
|
||||
BUG_ON(!NInoNonResident(ni));
|
||||
|
||||
blocksize_bits = vi->i_blkbits;
|
||||
blocksize = 1 << blocksize_bits;
|
||||
|
||||
/*
|
||||
* create_empty_buffers() will create uptodate/dirty buffers if the
|
||||
* page is uptodate/dirty.
|
||||
*/
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, blocksize, 0);
|
||||
bh = head = page_buffers(page);
|
||||
if (unlikely(!bh))
|
||||
return -ENOMEM;
|
||||
|
||||
/* The first block in the page. */
|
||||
block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
|
||||
|
||||
read_lock_irqsave(&ni->size_lock, flags);
|
||||
/*
|
||||
* The first out of bounds block for the allocated size. No need to
|
||||
* round up as allocated_size is in multiples of cluster size and the
|
||||
* minimum cluster size is 512 bytes, which is equal to the smallest
|
||||
* blocksize.
|
||||
*/
|
||||
ablock = ni->allocated_size >> blocksize_bits;
|
||||
i_size = i_size_read(vi);
|
||||
initialized_size = ni->initialized_size;
|
||||
read_unlock_irqrestore(&ni->size_lock, flags);
|
||||
|
||||
/* The last (fully or partially) initialized block. */
|
||||
iblock = initialized_size >> blocksize_bits;
|
||||
|
||||
/* Loop through all the buffers in the page. */
|
||||
block_start = 0;
|
||||
rl = NULL;
|
||||
err = 0;
|
||||
do {
|
||||
block_end = block_start + blocksize;
|
||||
/*
|
||||
* If buffer @bh is outside the write, just mark it uptodate
|
||||
* if the page is uptodate and continue with the next buffer.
|
||||
*/
|
||||
if (block_end <= from || block_start >= to) {
|
||||
if (PageUptodate(page)) {
|
||||
if (!buffer_uptodate(bh))
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* @bh is at least partially being written to.
|
||||
* Make sure it is not marked as new.
|
||||
*/
|
||||
//if (buffer_new(bh))
|
||||
// clear_buffer_new(bh);
|
||||
|
||||
if (block >= ablock) {
|
||||
// TODO: block is above allocated_size, need to
|
||||
// allocate it. Best done in one go to accommodate not
|
||||
// only block but all above blocks up to and including:
|
||||
// ((page->index << PAGE_CACHE_SHIFT) + to + blocksize
|
||||
// - 1) >> blobksize_bits. Obviously will need to round
|
||||
// up to next cluster boundary, too. This should be
|
||||
// done with a helper function, so it can be reused.
|
||||
ntfs_error(vol->sb, "Writing beyond allocated size "
|
||||
"is not supported yet. Sorry.");
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
// Need to update ablock.
|
||||
// Need to set_buffer_new() on all block bhs that are
|
||||
// newly allocated.
|
||||
}
|
||||
/*
|
||||
* Now we have enough allocated size to fulfill the whole
|
||||
* request, i.e. block < ablock is true.
|
||||
*/
|
||||
if (unlikely((block >= iblock) &&
|
||||
(initialized_size < i_size))) {
|
||||
/*
|
||||
* If this page is fully outside initialized size, zero
|
||||
* out all pages between the current initialized size
|
||||
* and the current page. Just use ntfs_readpage() to do
|
||||
* the zeroing transparently.
|
||||
*/
|
||||
if (block > iblock) {
|
||||
// TODO:
|
||||
// For each page do:
|
||||
// - read_cache_page()
|
||||
// Again for each page do:
|
||||
// - wait_on_page_locked()
|
||||
// - Check (PageUptodate(page) &&
|
||||
// !PageError(page))
|
||||
// Update initialized size in the attribute and
|
||||
// in the inode.
|
||||
// Again, for each page do:
|
||||
// __set_page_dirty_buffers();
|
||||
// page_cache_release()
|
||||
// We don't need to wait on the writes.
|
||||
// Update iblock.
|
||||
}
|
||||
/*
|
||||
* The current page straddles initialized size. Zero
|
||||
* all non-uptodate buffers and set them uptodate (and
|
||||
* dirty?). Note, there aren't any non-uptodate buffers
|
||||
* if the page is uptodate.
|
||||
* FIXME: For an uptodate page, the buffers may need to
|
||||
* be written out because they were not initialized on
|
||||
* disk before.
|
||||
*/
|
||||
if (!PageUptodate(page)) {
|
||||
// TODO:
|
||||
// Zero any non-uptodate buffers up to i_size.
|
||||
// Set them uptodate and dirty.
|
||||
}
|
||||
// TODO:
|
||||
// Update initialized size in the attribute and in the
|
||||
// inode (up to i_size).
|
||||
// Update iblock.
|
||||
// FIXME: This is inefficient. Try to batch the two
|
||||
// size changes to happen in one go.
|
||||
ntfs_error(vol->sb, "Writing beyond initialized size "
|
||||
"is not supported yet. Sorry.");
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
// Do NOT set_buffer_new() BUT DO clear buffer range
|
||||
// outside write request range.
|
||||
// set_buffer_uptodate() on complete buffers as well as
|
||||
// set_buffer_dirty().
|
||||
}
|
||||
|
||||
/* Need to map unmapped buffers. */
|
||||
if (!buffer_mapped(bh)) {
|
||||
/* Unmapped buffer. Need to map it. */
|
||||
bh->b_bdev = vol->sb->s_bdev;
|
||||
|
||||
/* Convert block into corresponding vcn and offset. */
|
||||
vcn = (VCN)block << blocksize_bits >>
|
||||
vol->cluster_size_bits;
|
||||
vcn_ofs = ((VCN)block << blocksize_bits) &
|
||||
vol->cluster_size_mask;
|
||||
|
||||
is_retry = FALSE;
|
||||
if (!rl) {
|
||||
lock_retry_remap:
|
||||
down_read(&ni->runlist.lock);
|
||||
rl = ni->runlist.rl;
|
||||
}
|
||||
if (likely(rl != NULL)) {
|
||||
/* Seek to element containing target vcn. */
|
||||
while (rl->length && rl[1].vcn <= vcn)
|
||||
rl++;
|
||||
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
|
||||
} else
|
||||
lcn = LCN_RL_NOT_MAPPED;
|
||||
if (unlikely(lcn < 0)) {
|
||||
/*
|
||||
* We extended the attribute allocation above.
|
||||
* If we hit an ENOENT here it means that the
|
||||
* allocation was insufficient which is a bug.
|
||||
*/
|
||||
BUG_ON(lcn == LCN_ENOENT);
|
||||
|
||||
/* It is a hole, need to instantiate it. */
|
||||
if (lcn == LCN_HOLE) {
|
||||
// TODO: Instantiate the hole.
|
||||
// clear_buffer_new(bh);
|
||||
// unmap_underlying_metadata(bh->b_bdev,
|
||||
// bh->b_blocknr);
|
||||
// For non-uptodate buffers, need to
|
||||
// zero out the region outside the
|
||||
// request in this bh or all bhs,
|
||||
// depending on what we implemented
|
||||
// above.
|
||||
// Need to flush_dcache_page().
|
||||
// Or could use set_buffer_new()
|
||||
// instead?
|
||||
ntfs_error(vol->sb, "Writing into "
|
||||
"sparse regions is "
|
||||
"not supported yet. "
|
||||
"Sorry.");
|
||||
err = -EOPNOTSUPP;
|
||||
if (!rl)
|
||||
up_read(&ni->runlist.lock);
|
||||
goto err_out;
|
||||
} else if (!is_retry &&
|
||||
lcn == LCN_RL_NOT_MAPPED) {
|
||||
is_retry = TRUE;
|
||||
/*
|
||||
* Attempt to map runlist, dropping
|
||||
* lock for the duration.
|
||||
*/
|
||||
up_read(&ni->runlist.lock);
|
||||
err = ntfs_map_runlist(ni, vcn);
|
||||
if (likely(!err))
|
||||
goto lock_retry_remap;
|
||||
rl = NULL;
|
||||
} else if (!rl)
|
||||
up_read(&ni->runlist.lock);
|
||||
/*
|
||||
* Failed to map the buffer, even after
|
||||
* retrying.
|
||||
*/
|
||||
if (!err)
|
||||
err = -EIO;
|
||||
bh->b_blocknr = -1;
|
||||
ntfs_error(vol->sb, "Failed to write to inode "
|
||||
"0x%lx, attribute type 0x%x, "
|
||||
"vcn 0x%llx, offset 0x%x "
|
||||
"because its location on disk "
|
||||
"could not be determined%s "
|
||||
"(error code %i).",
|
||||
ni->mft_no, ni->type,
|
||||
(unsigned long long)vcn,
|
||||
vcn_ofs, is_retry ? " even "
|
||||
"after retrying" : "", err);
|
||||
goto err_out;
|
||||
}
|
||||
/* We now have a successful remap, i.e. lcn >= 0. */
|
||||
|
||||
/* Setup buffer head to correct block. */
|
||||
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
|
||||
+ vcn_ofs) >> blocksize_bits;
|
||||
set_buffer_mapped(bh);
|
||||
|
||||
// FIXME: Something analogous to this is needed for
|
||||
// each newly allocated block, i.e. BH_New.
|
||||
// FIXME: Might need to take this out of the
|
||||
// if (!buffer_mapped(bh)) {}, depending on how we
|
||||
// implement things during the allocated_size and
|
||||
// initialized_size extension code above.
|
||||
if (buffer_new(bh)) {
|
||||
clear_buffer_new(bh);
|
||||
unmap_underlying_metadata(bh->b_bdev,
|
||||
bh->b_blocknr);
|
||||
if (PageUptodate(page)) {
|
||||
set_buffer_uptodate(bh);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Page is _not_ uptodate, zero surrounding
|
||||
* region. NOTE: This is how we decide if to
|
||||
* zero or not!
|
||||
*/
|
||||
if (block_end > to || block_start < from) {
|
||||
void *kaddr;
|
||||
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
if (block_end > to)
|
||||
memset(kaddr + to, 0,
|
||||
block_end - to);
|
||||
if (block_start < from)
|
||||
memset(kaddr + block_start, 0,
|
||||
from -
|
||||
block_start);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/* @bh is mapped, set it uptodate if the page is uptodate. */
|
||||
if (PageUptodate(page)) {
|
||||
if (!buffer_uptodate(bh))
|
||||
set_buffer_uptodate(bh);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* The page is not uptodate. The buffer is mapped. If it is not
|
||||
* uptodate, and it is only partially being written to, we need
|
||||
* to read the buffer in before the write, i.e. right now.
|
||||
*/
|
||||
if (!buffer_uptodate(bh) &&
|
||||
(block_start < from || block_end > to)) {
|
||||
ll_rw_block(READ, 1, &bh);
|
||||
*wait_bh++ = bh;
|
||||
}
|
||||
} while (block++, block_start = block_end,
|
||||
(bh = bh->b_this_page) != head);
|
||||
|
||||
/* Release the lock if we took it. */
|
||||
if (rl) {
|
||||
up_read(&ni->runlist.lock);
|
||||
rl = NULL;
|
||||
}
|
||||
|
||||
/* If we issued read requests, let them complete. */
|
||||
while (wait_bh > wait) {
|
||||
wait_on_buffer(*--wait_bh);
|
||||
if (!buffer_uptodate(*wait_bh))
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ntfs_debug("Done.");
|
||||
return 0;
|
||||
err_out:
|
||||
/*
|
||||
* Zero out any newly allocated blocks to avoid exposing stale data.
|
||||
* If BH_New is set, we know that the block was newly allocated in the
|
||||
* above loop.
|
||||
* FIXME: What about initialized_size increments? Have we done all the
|
||||
* required zeroing above? If not this error handling is broken, and
|
||||
* in particular the if (block_end <= from) check is completely bogus.
|
||||
*/
|
||||
bh = head;
|
||||
block_start = 0;
|
||||
is_retry = FALSE;
|
||||
do {
|
||||
block_end = block_start + blocksize;
|
||||
if (block_end <= from)
|
||||
continue;
|
||||
if (block_start >= to)
|
||||
break;
|
||||
if (buffer_new(bh)) {
|
||||
void *kaddr;
|
||||
|
||||
clear_buffer_new(bh);
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memset(kaddr + block_start, 0, bh->b_size);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
set_buffer_uptodate(bh);
|
||||
mark_buffer_dirty(bh);
|
||||
is_retry = TRUE;
|
||||
}
|
||||
} while (block_start = block_end, (bh = bh->b_this_page) != head);
|
||||
if (is_retry)
|
||||
flush_dcache_page(page);
|
||||
if (rl)
|
||||
up_read(&ni->runlist.lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ntfs_prepare_write - prepare a page for receiving data
|
||||
*
|
||||
* This is called from generic_file_write() with i_sem held on the inode
|
||||
* (@page->mapping->host). The @page is locked but not kmap()ped. The source
|
||||
* data has not yet been copied into the @page.
|
||||
*
|
||||
* Need to extend the attribute/fill in holes if necessary, create blocks and
|
||||
* make partially overwritten blocks uptodate,
|
||||
*
|
||||
* i_size is not to be modified yet.
|
||||
*
|
||||
* Return 0 on success or -errno on error.
|
||||
*
|
||||
* Should be using block_prepare_write() [support for sparse files] or
|
||||
* cont_prepare_write() [no support for sparse files]. Cannot do that due to
|
||||
* ntfs specifics but can look at them for implementation guidance.
|
||||
*
|
||||
* Note: In the range, @from is inclusive and @to is exclusive, i.e. @from is
|
||||
* the first byte in the page that will be written to and @to is the first byte
|
||||
* after the last byte that will be written to.
|
||||
*/
|
||||
static int ntfs_prepare_write(struct file *file, struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
s64 new_size;
|
||||
loff_t i_size;
|
||||
struct inode *vi = page->mapping->host;
|
||||
ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
|
||||
ntfs_volume *vol = ni->vol;
|
||||
ntfs_attr_search_ctx *ctx = NULL;
|
||||
MFT_RECORD *m = NULL;
|
||||
ATTR_RECORD *a;
|
||||
u8 *kaddr;
|
||||
u32 attr_len;
|
||||
int err;
|
||||
|
||||
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
|
||||
"0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
|
||||
page->index, from, to);
|
||||
BUG_ON(!PageLocked(page));
|
||||
BUG_ON(from > PAGE_CACHE_SIZE);
|
||||
BUG_ON(to > PAGE_CACHE_SIZE);
|
||||
BUG_ON(from > to);
|
||||
BUG_ON(NInoMstProtected(ni));
|
||||
/*
|
||||
* If a previous ntfs_truncate() failed, repeat it and abort if it
|
||||
* fails again.
|
||||
*/
|
||||
if (unlikely(NInoTruncateFailed(ni))) {
|
||||
down_write(&vi->i_alloc_sem);
|
||||
err = ntfs_truncate(vi);
|
||||
up_write(&vi->i_alloc_sem);
|
||||
if (err || NInoTruncateFailed(ni)) {
|
||||
if (!err)
|
||||
err = -EIO;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
/* If the attribute is not resident, deal with it elsewhere. */
|
||||
if (NInoNonResident(ni)) {
|
||||
/*
|
||||
* Only unnamed $DATA attributes can be compressed, encrypted,
|
||||
* and/or sparse.
|
||||
*/
|
||||
if (ni->type == AT_DATA && !ni->name_len) {
|
||||
/* If file is encrypted, deny access, just like NT4. */
|
||||
if (NInoEncrypted(ni)) {
|
||||
ntfs_debug("Denying write access to encrypted "
|
||||
"file.");
|
||||
return -EACCES;
|
||||
}
|
||||
/* Compressed data streams are handled in compress.c. */
|
||||
if (NInoCompressed(ni)) {
|
||||
// TODO: Implement and replace this check with
|
||||
// return ntfs_write_compressed_block(page);
|
||||
ntfs_error(vi->i_sb, "Writing to compressed "
|
||||
"files is not supported yet. "
|
||||
"Sorry.");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
// TODO: Implement and remove this check.
|
||||
if (NInoSparse(ni)) {
|
||||
ntfs_error(vi->i_sb, "Writing to sparse files "
|
||||
"is not supported yet. Sorry.");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
/* Normal data stream. */
|
||||
return ntfs_prepare_nonresident_write(page, from, to);
|
||||
}
|
||||
/*
|
||||
* Attribute is resident, implying it is not compressed, encrypted, or
|
||||
* sparse.
|
||||
*/
|
||||
BUG_ON(page_has_buffers(page));
|
||||
new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
|
||||
/* If we do not need to resize the attribute allocation we are done. */
|
||||
if (new_size <= i_size_read(vi))
|
||||
goto done;
|
||||
/* Map, pin, and lock the (base) mft record. */
|
||||
if (!NInoAttr(ni))
|
||||
base_ni = ni;
|
||||
else
|
||||
base_ni = ni->ext.base_ntfs_ino;
|
||||
m = map_mft_record(base_ni);
|
||||
if (IS_ERR(m)) {
|
||||
err = PTR_ERR(m);
|
||||
m = NULL;
|
||||
ctx = NULL;
|
||||
goto err_out;
|
||||
}
|
||||
ctx = ntfs_attr_get_search_ctx(base_ni, m);
|
||||
if (unlikely(!ctx)) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
|
||||
CASE_SENSITIVE, 0, NULL, 0, ctx);
|
||||
if (unlikely(err)) {
|
||||
if (err == -ENOENT)
|
||||
err = -EIO;
|
||||
goto err_out;
|
||||
}
|
||||
m = ctx->mrec;
|
||||
a = ctx->attr;
|
||||
/* The total length of the attribute value. */
|
||||
attr_len = le32_to_cpu(a->data.resident.value_length);
|
||||
/* Fix an eventual previous failure of ntfs_commit_write(). */
|
||||
i_size = i_size_read(vi);
|
||||
if (unlikely(attr_len > i_size)) {
|
||||
attr_len = i_size;
|
||||
a->data.resident.value_length = cpu_to_le32(attr_len);
|
||||
}
|
||||
/* If we do not need to resize the attribute allocation we are done. */
|
||||
if (new_size <= attr_len)
|
||||
goto done_unm;
|
||||
/* Check if new size is allowed in $AttrDef. */
|
||||
err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
|
||||
if (unlikely(err)) {
|
||||
if (err == -ERANGE) {
|
||||
ntfs_error(vol->sb, "Write would cause the inode "
|
||||
"0x%lx to exceed the maximum size for "
|
||||
"its attribute type (0x%x). Aborting "
|
||||
"write.", vi->i_ino,
|
||||
le32_to_cpu(ni->type));
|
||||
} else {
|
||||
ntfs_error(vol->sb, "Inode 0x%lx has unknown "
|
||||
"attribute type 0x%x. Aborting "
|
||||
"write.", vi->i_ino,
|
||||
le32_to_cpu(ni->type));
|
||||
err = -EIO;
|
||||
}
|
||||
goto err_out2;
|
||||
}
|
||||
/*
|
||||
* Extend the attribute record to be able to store the new attribute
|
||||
* size.
|
||||
*/
|
||||
if (new_size >= vol->mft_record_size || ntfs_attr_record_resize(m, a,
|
||||
le16_to_cpu(a->data.resident.value_offset) +
|
||||
new_size)) {
|
||||
/* Not enough space in the mft record. */
|
||||
ntfs_error(vol->sb, "Not enough space in the mft record for "
|
||||
"the resized attribute value. This is not "
|
||||
"supported yet. Aborting write.");
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_out2;
|
||||
}
|
||||
/*
|
||||
* We have enough space in the mft record to fit the write. This
|
||||
* implies the attribute is smaller than the mft record and hence the
|
||||
* attribute must be in a single page and hence page->index must be 0.
|
||||
*/
|
||||
BUG_ON(page->index);
|
||||
/*
|
||||
* If the beginning of the write is past the old size, enlarge the
|
||||
* attribute value up to the beginning of the write and fill it with
|
||||
* zeroes.
|
||||
*/
|
||||
if (from > attr_len) {
|
||||
memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
|
||||
attr_len, 0, from - attr_len);
|
||||
a->data.resident.value_length = cpu_to_le32(from);
|
||||
/* Zero the corresponding area in the page as well. */
|
||||
if (PageUptodate(page)) {
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
memset(kaddr + attr_len, 0, from - attr_len);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
flush_dcache_page(page);
|
||||
}
|
||||
}
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
mark_mft_record_dirty(ctx->ntfs_ino);
|
||||
done_unm:
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
unmap_mft_record(base_ni);
|
||||
/*
|
||||
* Because resident attributes are handled by memcpy() to/from the
|
||||
* corresponding MFT record, and because this form of i/o is byte
|
||||
* aligned rather than block aligned, there is no need to bring the
|
||||
* page uptodate here as in the non-resident case where we need to
|
||||
* bring the buffers straddled by the write uptodate before
|
||||
* generic_file_write() does the copying from userspace.
|
||||
*
|
||||
* We thus defer the uptodate bringing of the page region outside the
|
||||
* region written to to ntfs_commit_write(), which makes the code
|
||||
* simpler and saves one atomic kmap which is good.
|
||||
*/
|
||||
done:
|
||||
ntfs_debug("Done.");
|
||||
return 0;
|
||||
err_out:
|
||||
if (err == -ENOMEM)
|
||||
ntfs_warning(vi->i_sb, "Error allocating memory required to "
|
||||
"prepare the write.");
|
||||
else {
|
||||
ntfs_error(vi->i_sb, "Resident attribute prepare write failed "
|
||||
"with error %i.", err);
|
||||
NVolSetErrors(vol);
|
||||
make_bad_inode(vi);
|
||||
}
|
||||
err_out2:
|
||||
if (ctx)
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
if (m)
|
||||
unmap_mft_record(base_ni);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ntfs_commit_nonresident_write -
|
||||
*
|
||||
*/
|
||||
static int ntfs_commit_nonresident_write(struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
s64 pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
|
||||
struct inode *vi = page->mapping->host;
|
||||
struct buffer_head *bh, *head;
|
||||
unsigned int block_start, block_end, blocksize;
|
||||
BOOL partial;
|
||||
|
||||
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
|
||||
"0x%lx, from = %u, to = %u.", vi->i_ino,
|
||||
NTFS_I(vi)->type, page->index, from, to);
|
||||
blocksize = 1 << vi->i_blkbits;
|
||||
|
||||
// FIXME: We need a whole slew of special cases in here for compressed
|
||||
// files for example...
|
||||
// For now, we know ntfs_prepare_write() would have failed so we can't
|
||||
// get here in any of the cases which we have to special case, so we
|
||||
// are just a ripped off, unrolled generic_commit_write().
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
block_start = 0;
|
||||
partial = FALSE;
|
||||
do {
|
||||
block_end = block_start + blocksize;
|
||||
if (block_end <= from || block_start >= to) {
|
||||
if (!buffer_uptodate(bh))
|
||||
partial = TRUE;
|
||||
} else {
|
||||
set_buffer_uptodate(bh);
|
||||
mark_buffer_dirty(bh);
|
||||
}
|
||||
} while (block_start = block_end, (bh = bh->b_this_page) != head);
|
||||
/*
|
||||
* If this is a partial write which happened to make all buffers
|
||||
* uptodate then we can optimize away a bogus ->readpage() for the next
|
||||
* read(). Here we 'discover' whether the page went uptodate as a
|
||||
* result of this (potentially partial) write.
|
||||
*/
|
||||
if (!partial)
|
||||
SetPageUptodate(page);
|
||||
/*
|
||||
* Not convinced about this at all. See disparity comment above. For
|
||||
* now we know ntfs_prepare_write() would have failed in the write
|
||||
* exceeds i_size case, so this will never trigger which is fine.
|
||||
*/
|
||||
if (pos > i_size_read(vi)) {
|
||||
ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
|
||||
"not supported yet. Sorry.");
|
||||
return -EOPNOTSUPP;
|
||||
// vi->i_size = pos;
|
||||
// mark_inode_dirty(vi);
|
||||
}
|
||||
ntfs_debug("Done.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ntfs_commit_write - commit the received data
|
||||
*
|
||||
* This is called from generic_file_write() with i_sem held on the inode
|
||||
* (@page->mapping->host). The @page is locked but not kmap()ped. The source
|
||||
* data has already been copied into the @page. ntfs_prepare_write() has been
|
||||
* called before the data copied and it returned success so we can take the
|
||||
* results of various BUG checks and some error handling for granted.
|
||||
*
|
||||
* Need to mark modified blocks dirty so they get written out later when
|
||||
* ntfs_writepage() is invoked by the VM.
|
||||
*
|
||||
* Return 0 on success or -errno on error.
|
||||
*
|
||||
* Should be using generic_commit_write(). This marks buffers uptodate and
|
||||
* dirty, sets the page uptodate if all buffers in the page are uptodate, and
|
||||
* updates i_size if the end of io is beyond i_size. In that case, it also
|
||||
* marks the inode dirty.
|
||||
*
|
||||
* Cannot use generic_commit_write() due to ntfs specialities but can look at
|
||||
* it for implementation guidance.
|
||||
*
|
||||
* If things have gone as outlined in ntfs_prepare_write(), then we do not
|
||||
* need to do any page content modifications here at all, except in the write
|
||||
* to resident attribute case, where we need to do the uptodate bringing here
|
||||
* which we combine with the copying into the mft record which means we save
|
||||
* one atomic kmap.
|
||||
*/
|
||||
static int ntfs_commit_write(struct file *file, struct page *page,
|
||||
unsigned from, unsigned to)
|
||||
{
|
||||
struct inode *vi = page->mapping->host;
|
||||
ntfs_inode *base_ni, *ni = NTFS_I(vi);
|
||||
char *kaddr, *kattr;
|
||||
ntfs_attr_search_ctx *ctx;
|
||||
MFT_RECORD *m;
|
||||
ATTR_RECORD *a;
|
||||
u32 attr_len;
|
||||
int err;
|
||||
|
||||
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
|
||||
"0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
|
||||
page->index, from, to);
|
||||
/* If the attribute is not resident, deal with it elsewhere. */
|
||||
if (NInoNonResident(ni)) {
|
||||
/* Only unnamed $DATA attributes can be compressed/encrypted. */
|
||||
if (ni->type == AT_DATA && !ni->name_len) {
|
||||
/* Encrypted files need separate handling. */
|
||||
if (NInoEncrypted(ni)) {
|
||||
// We never get here at present!
|
||||
BUG();
|
||||
}
|
||||
/* Compressed data streams are handled in compress.c. */
|
||||
if (NInoCompressed(ni)) {
|
||||
// TODO: Implement this!
|
||||
// return ntfs_write_compressed_block(page);
|
||||
// We never get here at present!
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
/* Normal data stream. */
|
||||
return ntfs_commit_nonresident_write(page, from, to);
|
||||
}
|
||||
/*
|
||||
* Attribute is resident, implying it is not compressed, encrypted, or
|
||||
* sparse.
|
||||
*/
|
||||
if (!NInoAttr(ni))
|
||||
base_ni = ni;
|
||||
else
|
||||
base_ni = ni->ext.base_ntfs_ino;
|
||||
/* Map, pin, and lock the mft record. */
|
||||
m = map_mft_record(base_ni);
|
||||
if (IS_ERR(m)) {
|
||||
err = PTR_ERR(m);
|
||||
m = NULL;
|
||||
ctx = NULL;
|
||||
goto err_out;
|
||||
}
|
||||
ctx = ntfs_attr_get_search_ctx(base_ni, m);
|
||||
if (unlikely(!ctx)) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
|
||||
CASE_SENSITIVE, 0, NULL, 0, ctx);
|
||||
if (unlikely(err)) {
|
||||
if (err == -ENOENT)
|
||||
err = -EIO;
|
||||
goto err_out;
|
||||
}
|
||||
a = ctx->attr;
|
||||
/* The total length of the attribute value. */
|
||||
attr_len = le32_to_cpu(a->data.resident.value_length);
|
||||
BUG_ON(from > attr_len);
|
||||
kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
|
||||
kaddr = kmap_atomic(page, KM_USER0);
|
||||
/* Copy the received data from the page to the mft record. */
|
||||
memcpy(kattr + from, kaddr + from, to - from);
|
||||
/* Update the attribute length if necessary. */
|
||||
if (to > attr_len) {
|
||||
attr_len = to;
|
||||
a->data.resident.value_length = cpu_to_le32(attr_len);
|
||||
}
|
||||
/*
|
||||
* If the page is not uptodate, bring the out of bounds area(s)
|
||||
* uptodate by copying data from the mft record to the page.
|
||||
*/
|
||||
if (!PageUptodate(page)) {
|
||||
if (from > 0)
|
||||
memcpy(kaddr, kattr, from);
|
||||
if (to < attr_len)
|
||||
memcpy(kaddr + to, kattr + to, attr_len - to);
|
||||
/* Zero the region outside the end of the attribute value. */
|
||||
if (attr_len < PAGE_CACHE_SIZE)
|
||||
memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
|
||||
/*
|
||||
* The probability of not having done any of the above is
|
||||
* extremely small, so we just flush unconditionally.
|
||||
*/
|
||||
flush_dcache_page(page);
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
/* Update i_size if necessary. */
|
||||
if (i_size_read(vi) < attr_len) {
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&ni->size_lock, flags);
|
||||
ni->allocated_size = ni->initialized_size = attr_len;
|
||||
i_size_write(vi, attr_len);
|
||||
write_unlock_irqrestore(&ni->size_lock, flags);
|
||||
}
|
||||
/* Mark the mft record dirty, so it gets written back. */
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
mark_mft_record_dirty(ctx->ntfs_ino);
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
unmap_mft_record(base_ni);
|
||||
ntfs_debug("Done.");
|
||||
return 0;
|
||||
err_out:
|
||||
if (err == -ENOMEM) {
|
||||
ntfs_warning(vi->i_sb, "Error allocating memory required to "
|
||||
"commit the write.");
|
||||
if (PageUptodate(page)) {
|
||||
ntfs_warning(vi->i_sb, "Page is uptodate, setting "
|
||||
"dirty so the write will be retried "
|
||||
"later on by the VM.");
|
||||
/*
|
||||
* Put the page on mapping->dirty_pages, but leave its
|
||||
* buffers' dirty state as-is.
|
||||
*/
|
||||
__set_page_dirty_nobuffers(page);
|
||||
err = 0;
|
||||
} else
|
||||
ntfs_error(vi->i_sb, "Page is not uptodate. Written "
|
||||
"data has been lost.");
|
||||
} else {
|
||||
ntfs_error(vi->i_sb, "Resident attribute commit write failed "
|
||||
"with error %i.", err);
|
||||
NVolSetErrors(ni->vol);
|
||||
make_bad_inode(vi);
|
||||
}
|
||||
if (ctx)
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
if (m)
|
||||
unmap_mft_record(base_ni);
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* NTFS_RW */
|
||||
|
||||
/**
|
||||
|
@ -2377,9 +1552,6 @@ struct address_space_operations ntfs_aops = {
|
|||
disk request queue. */
|
||||
#ifdef NTFS_RW
|
||||
.writepage = ntfs_writepage, /* Write dirty page to disk. */
|
||||
.prepare_write = ntfs_prepare_write, /* Prepare page and buffers
|
||||
ready to receive data. */
|
||||
.commit_write = ntfs_commit_write, /* Commit received data. */
|
||||
#endif /* NTFS_RW */
|
||||
};
|
||||
|
||||
|
|
981
fs/ntfs/attrib.c
981
fs/ntfs/attrib.c
File diff suppressed because it is too large
Load diff
|
@ -60,14 +60,15 @@ typedef struct {
|
|||
ATTR_RECORD *base_attr;
|
||||
} ntfs_attr_search_ctx;
|
||||
|
||||
extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn);
|
||||
extern int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn,
|
||||
ntfs_attr_search_ctx *ctx);
|
||||
extern int ntfs_map_runlist(ntfs_inode *ni, VCN vcn);
|
||||
|
||||
extern LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
|
||||
const BOOL write_locked);
|
||||
|
||||
extern runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni,
|
||||
const VCN vcn, const BOOL write_locked);
|
||||
const VCN vcn, ntfs_attr_search_ctx *ctx);
|
||||
|
||||
int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
|
||||
const u32 name_len, const IGNORE_CASE_BOOL ic,
|
||||
|
@ -102,7 +103,10 @@ extern int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size);
|
|||
extern int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
|
||||
const u32 new_size);
|
||||
|
||||
extern int ntfs_attr_make_non_resident(ntfs_inode *ni);
|
||||
extern int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size);
|
||||
|
||||
extern s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
|
||||
const s64 new_data_size, const s64 data_start);
|
||||
|
||||
extern int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt,
|
||||
const u8 val);
|
||||
|
|
2255
fs/ntfs/file.c
2255
fs/ntfs/file.c
File diff suppressed because it is too large
Load diff
520
fs/ntfs/inode.c
520
fs/ntfs/inode.c
|
@ -30,6 +30,7 @@
|
|||
#include "debug.h"
|
||||
#include "inode.h"
|
||||
#include "attrib.h"
|
||||
#include "lcnalloc.h"
|
||||
#include "malloc.h"
|
||||
#include "mft.h"
|
||||
#include "time.h"
|
||||
|
@ -2291,11 +2292,16 @@ int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt)
|
|||
|
||||
#ifdef NTFS_RW
|
||||
|
||||
static const char *es = " Leaving inconsistent metadata. Unmount and run "
|
||||
"chkdsk.";
|
||||
|
||||
/**
|
||||
* ntfs_truncate - called when the i_size of an ntfs inode is changed
|
||||
* @vi: inode for which the i_size was changed
|
||||
*
|
||||
* We do not support i_size changes yet.
|
||||
* We only support i_size changes for normal files at present, i.e. not
|
||||
* compressed and not encrypted. This is enforced in ntfs_setattr(), see
|
||||
* below.
|
||||
*
|
||||
* The kernel guarantees that @vi is a regular file (S_ISREG() is true) and
|
||||
* that the change is allowed.
|
||||
|
@ -2306,80 +2312,499 @@ int ntfs_show_options(struct seq_file *sf, struct vfsmount *mnt)
|
|||
* Returns 0 on success or -errno on error.
|
||||
*
|
||||
* Called with ->i_sem held. In all but one case ->i_alloc_sem is held for
|
||||
* writing. The only case where ->i_alloc_sem is not held is
|
||||
* writing. The only case in the kernel where ->i_alloc_sem is not held is
|
||||
* mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called
|
||||
* with the current i_size as the offset which means that it is a noop as far
|
||||
* as ntfs_truncate() is concerned.
|
||||
* with the current i_size as the offset. The analogous place in NTFS is in
|
||||
* fs/ntfs/file.c::ntfs_file_buffered_write() where we call vmtruncate() again
|
||||
* without holding ->i_alloc_sem.
|
||||
*/
|
||||
int ntfs_truncate(struct inode *vi)
|
||||
{
|
||||
ntfs_inode *ni = NTFS_I(vi);
|
||||
s64 new_size, old_size, nr_freed, new_alloc_size, old_alloc_size;
|
||||
VCN highest_vcn;
|
||||
unsigned long flags;
|
||||
ntfs_inode *base_ni, *ni = NTFS_I(vi);
|
||||
ntfs_volume *vol = ni->vol;
|
||||
ntfs_attr_search_ctx *ctx;
|
||||
MFT_RECORD *m;
|
||||
ATTR_RECORD *a;
|
||||
const char *te = " Leaving file length out of sync with i_size.";
|
||||
int err;
|
||||
int err, mp_size, size_change, alloc_change;
|
||||
u32 attr_len;
|
||||
|
||||
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
|
||||
BUG_ON(NInoAttr(ni));
|
||||
BUG_ON(S_ISDIR(vi->i_mode));
|
||||
BUG_ON(NInoMstProtected(ni));
|
||||
BUG_ON(ni->nr_extents < 0);
|
||||
m = map_mft_record(ni);
|
||||
retry_truncate:
|
||||
/*
|
||||
* Lock the runlist for writing and map the mft record to ensure it is
|
||||
* safe to mess with the attribute runlist and sizes.
|
||||
*/
|
||||
down_write(&ni->runlist.lock);
|
||||
if (!NInoAttr(ni))
|
||||
base_ni = ni;
|
||||
else
|
||||
base_ni = ni->ext.base_ntfs_ino;
|
||||
m = map_mft_record(base_ni);
|
||||
if (IS_ERR(m)) {
|
||||
err = PTR_ERR(m);
|
||||
ntfs_error(vi->i_sb, "Failed to map mft record for inode 0x%lx "
|
||||
"(error code %d).%s", vi->i_ino, err, te);
|
||||
ctx = NULL;
|
||||
m = NULL;
|
||||
goto err_out;
|
||||
goto old_bad_out;
|
||||
}
|
||||
ctx = ntfs_attr_get_search_ctx(ni, m);
|
||||
ctx = ntfs_attr_get_search_ctx(base_ni, m);
|
||||
if (unlikely(!ctx)) {
|
||||
ntfs_error(vi->i_sb, "Failed to allocate a search context for "
|
||||
"inode 0x%lx (not enough memory).%s",
|
||||
vi->i_ino, te);
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
goto old_bad_out;
|
||||
}
|
||||
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
|
||||
CASE_SENSITIVE, 0, NULL, 0, ctx);
|
||||
if (unlikely(err)) {
|
||||
if (err == -ENOENT)
|
||||
if (err == -ENOENT) {
|
||||
ntfs_error(vi->i_sb, "Open attribute is missing from "
|
||||
"mft record. Inode 0x%lx is corrupt. "
|
||||
"Run chkdsk.", vi->i_ino);
|
||||
else
|
||||
"Run chkdsk.%s", vi->i_ino, te);
|
||||
err = -EIO;
|
||||
} else
|
||||
ntfs_error(vi->i_sb, "Failed to lookup attribute in "
|
||||
"inode 0x%lx (error code %d).",
|
||||
vi->i_ino, err);
|
||||
"inode 0x%lx (error code %d).%s",
|
||||
vi->i_ino, err, te);
|
||||
goto old_bad_out;
|
||||
}
|
||||
m = ctx->mrec;
|
||||
a = ctx->attr;
|
||||
/*
|
||||
* The i_size of the vfs inode is the new size for the attribute value.
|
||||
*/
|
||||
new_size = i_size_read(vi);
|
||||
/* The current size of the attribute value is the old size. */
|
||||
old_size = ntfs_attr_size(a);
|
||||
/* Calculate the new allocated size. */
|
||||
if (NInoNonResident(ni))
|
||||
new_alloc_size = (new_size + vol->cluster_size - 1) &
|
||||
~(s64)vol->cluster_size_mask;
|
||||
else
|
||||
new_alloc_size = (new_size + 7) & ~7;
|
||||
/* The current allocated size is the old allocated size. */
|
||||
read_lock_irqsave(&ni->size_lock, flags);
|
||||
old_alloc_size = ni->allocated_size;
|
||||
read_unlock_irqrestore(&ni->size_lock, flags);
|
||||
/*
|
||||
* The change in the file size. This will be 0 if no change, >0 if the
|
||||
* size is growing, and <0 if the size is shrinking.
|
||||
*/
|
||||
size_change = -1;
|
||||
if (new_size - old_size >= 0) {
|
||||
size_change = 1;
|
||||
if (new_size == old_size)
|
||||
size_change = 0;
|
||||
}
|
||||
/* As above for the allocated size. */
|
||||
alloc_change = -1;
|
||||
if (new_alloc_size - old_alloc_size >= 0) {
|
||||
alloc_change = 1;
|
||||
if (new_alloc_size == old_alloc_size)
|
||||
alloc_change = 0;
|
||||
}
|
||||
/*
|
||||
* If neither the size nor the allocation are being changed there is
|
||||
* nothing to do.
|
||||
*/
|
||||
if (!size_change && !alloc_change)
|
||||
goto unm_done;
|
||||
/* If the size is changing, check if new size is allowed in $AttrDef. */
|
||||
if (size_change) {
|
||||
err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
|
||||
if (unlikely(err)) {
|
||||
if (err == -ERANGE) {
|
||||
ntfs_error(vol->sb, "Truncate would cause the "
|
||||
"inode 0x%lx to %simum size "
|
||||
"for its attribute type "
|
||||
"(0x%x). Aborting truncate.",
|
||||
vi->i_ino,
|
||||
new_size > old_size ? "exceed "
|
||||
"the max" : "go under the min",
|
||||
le32_to_cpu(ni->type));
|
||||
err = -EFBIG;
|
||||
} else {
|
||||
ntfs_error(vol->sb, "Inode 0x%lx has unknown "
|
||||
"attribute type 0x%x. "
|
||||
"Aborting truncate.",
|
||||
vi->i_ino,
|
||||
le32_to_cpu(ni->type));
|
||||
err = -EIO;
|
||||
}
|
||||
/* Reset the vfs inode size to the old size. */
|
||||
i_size_write(vi, old_size);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
if (NInoCompressed(ni) || NInoEncrypted(ni)) {
|
||||
ntfs_warning(vi->i_sb, "Changes in inode size are not "
|
||||
"supported yet for %s files, ignoring.",
|
||||
NInoCompressed(ni) ? "compressed" :
|
||||
"encrypted");
|
||||
err = -EOPNOTSUPP;
|
||||
goto bad_out;
|
||||
}
|
||||
if (a->non_resident)
|
||||
goto do_non_resident_truncate;
|
||||
BUG_ON(NInoNonResident(ni));
|
||||
/* Resize the attribute record to best fit the new attribute size. */
|
||||
if (new_size < vol->mft_record_size &&
|
||||
!ntfs_resident_attr_value_resize(m, a, new_size)) {
|
||||
unsigned long flags;
|
||||
|
||||
/* The resize succeeded! */
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
mark_mft_record_dirty(ctx->ntfs_ino);
|
||||
write_lock_irqsave(&ni->size_lock, flags);
|
||||
/* Update the sizes in the ntfs inode and all is done. */
|
||||
ni->allocated_size = le32_to_cpu(a->length) -
|
||||
le16_to_cpu(a->data.resident.value_offset);
|
||||
/*
|
||||
* Note ntfs_resident_attr_value_resize() has already done any
|
||||
* necessary data clearing in the attribute record. When the
|
||||
* file is being shrunk vmtruncate() will already have cleared
|
||||
* the top part of the last partial page, i.e. since this is
|
||||
* the resident case this is the page with index 0. However,
|
||||
* when the file is being expanded, the page cache page data
|
||||
* between the old data_size, i.e. old_size, and the new_size
|
||||
* has not been zeroed. Fortunately, we do not need to zero it
|
||||
* either since on one hand it will either already be zero due
|
||||
* to both readpage and writepage clearing partial page data
|
||||
* beyond i_size in which case there is nothing to do or in the
|
||||
* case of the file being mmap()ped at the same time, POSIX
|
||||
* specifies that the behaviour is unspecified thus we do not
|
||||
* have to do anything. This means that in our implementation
|
||||
* in the rare case that the file is mmap()ped and a write
|
||||
* occured into the mmap()ped region just beyond the file size
|
||||
* and writepage has not yet been called to write out the page
|
||||
* (which would clear the area beyond the file size) and we now
|
||||
* extend the file size to incorporate this dirty region
|
||||
* outside the file size, a write of the page would result in
|
||||
* this data being written to disk instead of being cleared.
|
||||
* Given both POSIX and the Linux mmap(2) man page specify that
|
||||
* this corner case is undefined, we choose to leave it like
|
||||
* that as this is much simpler for us as we cannot lock the
|
||||
* relevant page now since we are holding too many ntfs locks
|
||||
* which would result in a lock reversal deadlock.
|
||||
*/
|
||||
ni->initialized_size = new_size;
|
||||
write_unlock_irqrestore(&ni->size_lock, flags);
|
||||
goto unm_done;
|
||||
}
|
||||
/* If the above resize failed, this must be an attribute extension. */
|
||||
BUG_ON(size_change < 0);
|
||||
/*
|
||||
* We have to drop all the locks so we can call
|
||||
* ntfs_attr_make_non_resident(). This could be optimised by try-
|
||||
* locking the first page cache page and only if that fails dropping
|
||||
* the locks, locking the page, and redoing all the locking and
|
||||
* lookups. While this would be a huge optimisation, it is not worth
|
||||
* it as this is definitely a slow code path as it only ever can happen
|
||||
* once for any given file.
|
||||
*/
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
unmap_mft_record(base_ni);
|
||||
up_write(&ni->runlist.lock);
|
||||
/*
|
||||
* Not enough space in the mft record, try to make the attribute
|
||||
* non-resident and if successful restart the truncation process.
|
||||
*/
|
||||
err = ntfs_attr_make_non_resident(ni, old_size);
|
||||
if (likely(!err))
|
||||
goto retry_truncate;
|
||||
/*
|
||||
* Could not make non-resident. If this is due to this not being
|
||||
* permitted for this attribute type or there not being enough space,
|
||||
* try to make other attributes non-resident. Otherwise fail.
|
||||
*/
|
||||
if (unlikely(err != -EPERM && err != -ENOSPC)) {
|
||||
ntfs_error(vol->sb, "Cannot truncate inode 0x%lx, attribute "
|
||||
"type 0x%x, because the conversion from "
|
||||
"resident to non-resident attribute failed "
|
||||
"with error code %i.", vi->i_ino,
|
||||
(unsigned)le32_to_cpu(ni->type), err);
|
||||
if (err != -ENOMEM)
|
||||
err = -EIO;
|
||||
goto conv_err_out;
|
||||
}
|
||||
/* TODO: Not implemented from here, abort. */
|
||||
if (err == -ENOSPC)
|
||||
ntfs_error(vol->sb, "Not enough space in the mft record/on "
|
||||
"disk for the non-resident attribute value. "
|
||||
"This case is not implemented yet.");
|
||||
else /* if (err == -EPERM) */
|
||||
ntfs_error(vol->sb, "This attribute type may not be "
|
||||
"non-resident. This case is not implemented "
|
||||
"yet.");
|
||||
err = -EOPNOTSUPP;
|
||||
goto conv_err_out;
|
||||
#if 0
|
||||
// TODO: Attempt to make other attributes non-resident.
|
||||
if (!err)
|
||||
goto do_resident_extend;
|
||||
/*
|
||||
* Both the attribute list attribute and the standard information
|
||||
* attribute must remain in the base inode. Thus, if this is one of
|
||||
* these attributes, we have to try to move other attributes out into
|
||||
* extent mft records instead.
|
||||
*/
|
||||
if (ni->type == AT_ATTRIBUTE_LIST ||
|
||||
ni->type == AT_STANDARD_INFORMATION) {
|
||||
// TODO: Attempt to move other attributes into extent mft
|
||||
// records.
|
||||
err = -EOPNOTSUPP;
|
||||
if (!err)
|
||||
goto do_resident_extend;
|
||||
goto err_out;
|
||||
}
|
||||
a = ctx->attr;
|
||||
/* If the size has not changed there is nothing to do. */
|
||||
if (ntfs_attr_size(a) == i_size_read(vi))
|
||||
goto done;
|
||||
// TODO: Implement the truncate...
|
||||
ntfs_error(vi->i_sb, "Inode size has changed but this is not "
|
||||
"implemented yet. Resetting inode size to old value. "
|
||||
" This is most likely a bug in the ntfs driver!");
|
||||
i_size_write(vi, ntfs_attr_size(a));
|
||||
done:
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
unmap_mft_record(ni);
|
||||
NInoClearTruncateFailed(ni);
|
||||
ntfs_debug("Done.");
|
||||
return 0;
|
||||
err_out:
|
||||
if (err != -ENOMEM) {
|
||||
NVolSetErrors(vol);
|
||||
make_bad_inode(vi);
|
||||
// TODO: Attempt to move this attribute to an extent mft record, but
|
||||
// only if it is not already the only attribute in an mft record in
|
||||
// which case there would be nothing to gain.
|
||||
err = -EOPNOTSUPP;
|
||||
if (!err)
|
||||
goto do_resident_extend;
|
||||
/* There is nothing we can do to make enough space. )-: */
|
||||
goto err_out;
|
||||
#endif
|
||||
do_non_resident_truncate:
|
||||
BUG_ON(!NInoNonResident(ni));
|
||||
if (alloc_change < 0) {
|
||||
highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
|
||||
if (highest_vcn > 0 &&
|
||||
old_alloc_size >> vol->cluster_size_bits >
|
||||
highest_vcn + 1) {
|
||||
/*
|
||||
* This attribute has multiple extents. Not yet
|
||||
* supported.
|
||||
*/
|
||||
ntfs_error(vol->sb, "Cannot truncate inode 0x%lx, "
|
||||
"attribute type 0x%x, because the "
|
||||
"attribute is highly fragmented (it "
|
||||
"consists of multiple extents) and "
|
||||
"this case is not implemented yet.",
|
||||
vi->i_ino,
|
||||
(unsigned)le32_to_cpu(ni->type));
|
||||
err = -EOPNOTSUPP;
|
||||
goto bad_out;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If the size is shrinking, need to reduce the initialized_size and
|
||||
* the data_size before reducing the allocation.
|
||||
*/
|
||||
if (size_change < 0) {
|
||||
/*
|
||||
* Make the valid size smaller (i_size is already up-to-date).
|
||||
*/
|
||||
write_lock_irqsave(&ni->size_lock, flags);
|
||||
if (new_size < ni->initialized_size) {
|
||||
ni->initialized_size = new_size;
|
||||
a->data.non_resident.initialized_size =
|
||||
cpu_to_sle64(new_size);
|
||||
}
|
||||
a->data.non_resident.data_size = cpu_to_sle64(new_size);
|
||||
write_unlock_irqrestore(&ni->size_lock, flags);
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
mark_mft_record_dirty(ctx->ntfs_ino);
|
||||
/* If the allocated size is not changing, we are done. */
|
||||
if (!alloc_change)
|
||||
goto unm_done;
|
||||
/*
|
||||
* If the size is shrinking it makes no sense for the
|
||||
* allocation to be growing.
|
||||
*/
|
||||
BUG_ON(alloc_change > 0);
|
||||
} else /* if (size_change >= 0) */ {
|
||||
/*
|
||||
* The file size is growing or staying the same but the
|
||||
* allocation can be shrinking, growing or staying the same.
|
||||
*/
|
||||
if (alloc_change > 0) {
|
||||
/*
|
||||
* We need to extend the allocation and possibly update
|
||||
* the data size. If we are updating the data size,
|
||||
* since we are not touching the initialized_size we do
|
||||
* not need to worry about the actual data on disk.
|
||||
* And as far as the page cache is concerned, there
|
||||
* will be no pages beyond the old data size and any
|
||||
* partial region in the last page between the old and
|
||||
* new data size (or the end of the page if the new
|
||||
* data size is outside the page) does not need to be
|
||||
* modified as explained above for the resident
|
||||
* attribute truncate case. To do this, we simply drop
|
||||
* the locks we hold and leave all the work to our
|
||||
* friendly helper ntfs_attr_extend_allocation().
|
||||
*/
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
unmap_mft_record(base_ni);
|
||||
up_write(&ni->runlist.lock);
|
||||
err = ntfs_attr_extend_allocation(ni, new_size,
|
||||
size_change > 0 ? new_size : -1, -1);
|
||||
/*
|
||||
* ntfs_attr_extend_allocation() will have done error
|
||||
* output already.
|
||||
*/
|
||||
goto done;
|
||||
}
|
||||
if (!alloc_change)
|
||||
goto alloc_done;
|
||||
}
|
||||
/* alloc_change < 0 */
|
||||
/* Free the clusters. */
|
||||
nr_freed = ntfs_cluster_free(ni, new_alloc_size >>
|
||||
vol->cluster_size_bits, -1, ctx);
|
||||
m = ctx->mrec;
|
||||
a = ctx->attr;
|
||||
if (unlikely(nr_freed < 0)) {
|
||||
ntfs_error(vol->sb, "Failed to release cluster(s) (error code "
|
||||
"%lli). Unmount and run chkdsk to recover "
|
||||
"the lost cluster(s).", (long long)nr_freed);
|
||||
NVolSetErrors(vol);
|
||||
nr_freed = 0;
|
||||
}
|
||||
/* Truncate the runlist. */
|
||||
err = ntfs_rl_truncate_nolock(vol, &ni->runlist,
|
||||
new_alloc_size >> vol->cluster_size_bits);
|
||||
/*
|
||||
* If the runlist truncation failed and/or the search context is no
|
||||
* longer valid, we cannot resize the attribute record or build the
|
||||
* mapping pairs array thus we mark the inode bad so that no access to
|
||||
* the freed clusters can happen.
|
||||
*/
|
||||
if (unlikely(err || IS_ERR(m))) {
|
||||
ntfs_error(vol->sb, "Failed to %s (error code %li).%s",
|
||||
IS_ERR(m) ?
|
||||
"restore attribute search context" :
|
||||
"truncate attribute runlist",
|
||||
IS_ERR(m) ? PTR_ERR(m) : err, es);
|
||||
err = -EIO;
|
||||
goto bad_out;
|
||||
}
|
||||
/* Get the size for the shrunk mapping pairs array for the runlist. */
|
||||
mp_size = ntfs_get_size_for_mapping_pairs(vol, ni->runlist.rl, 0, -1);
|
||||
if (unlikely(mp_size <= 0)) {
|
||||
ntfs_error(vol->sb, "Cannot shrink allocation of inode 0x%lx, "
|
||||
"attribute type 0x%x, because determining the "
|
||||
"size for the mapping pairs failed with error "
|
||||
"code %i.%s", vi->i_ino,
|
||||
(unsigned)le32_to_cpu(ni->type), mp_size, es);
|
||||
err = -EIO;
|
||||
goto bad_out;
|
||||
}
|
||||
/*
|
||||
* Shrink the attribute record for the new mapping pairs array. Note,
|
||||
* this cannot fail since we are making the attribute smaller thus by
|
||||
* definition there is enough space to do so.
|
||||
*/
|
||||
attr_len = le32_to_cpu(a->length);
|
||||
err = ntfs_attr_record_resize(m, a, mp_size +
|
||||
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
|
||||
BUG_ON(err);
|
||||
/*
|
||||
* Generate the mapping pairs array directly into the attribute record.
|
||||
*/
|
||||
err = ntfs_mapping_pairs_build(vol, (u8*)a +
|
||||
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
|
||||
mp_size, ni->runlist.rl, 0, -1, NULL);
|
||||
if (unlikely(err)) {
|
||||
ntfs_error(vol->sb, "Cannot shrink allocation of inode 0x%lx, "
|
||||
"attribute type 0x%x, because building the "
|
||||
"mapping pairs failed with error code %i.%s",
|
||||
vi->i_ino, (unsigned)le32_to_cpu(ni->type),
|
||||
err, es);
|
||||
err = -EIO;
|
||||
goto bad_out;
|
||||
}
|
||||
/* Update the allocated/compressed size as well as the highest vcn. */
|
||||
a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
|
||||
vol->cluster_size_bits) - 1);
|
||||
write_lock_irqsave(&ni->size_lock, flags);
|
||||
ni->allocated_size = new_alloc_size;
|
||||
a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
|
||||
if (NInoSparse(ni) || NInoCompressed(ni)) {
|
||||
if (nr_freed) {
|
||||
ni->itype.compressed.size -= nr_freed <<
|
||||
vol->cluster_size_bits;
|
||||
BUG_ON(ni->itype.compressed.size < 0);
|
||||
a->data.non_resident.compressed_size = cpu_to_sle64(
|
||||
ni->itype.compressed.size);
|
||||
vi->i_blocks = ni->itype.compressed.size >> 9;
|
||||
}
|
||||
} else
|
||||
vi->i_blocks = new_alloc_size >> 9;
|
||||
write_unlock_irqrestore(&ni->size_lock, flags);
|
||||
/*
|
||||
* We have shrunk the allocation. If this is a shrinking truncate we
|
||||
* have already dealt with the initialized_size and the data_size above
|
||||
* and we are done. If the truncate is only changing the allocation
|
||||
* and not the data_size, we are also done. If this is an extending
|
||||
* truncate, need to extend the data_size now which is ensured by the
|
||||
* fact that @size_change is positive.
|
||||
*/
|
||||
alloc_done:
|
||||
/*
|
||||
* If the size is growing, need to update it now. If it is shrinking,
|
||||
* we have already updated it above (before the allocation change).
|
||||
*/
|
||||
if (size_change > 0)
|
||||
a->data.non_resident.data_size = cpu_to_sle64(new_size);
|
||||
/* Ensure the modified mft record is written out. */
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
mark_mft_record_dirty(ctx->ntfs_ino);
|
||||
unm_done:
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
unmap_mft_record(base_ni);
|
||||
up_write(&ni->runlist.lock);
|
||||
done:
|
||||
/* Update the mtime and ctime on the base inode. */
|
||||
inode_update_time(VFS_I(base_ni), 1);
|
||||
if (likely(!err)) {
|
||||
NInoClearTruncateFailed(ni);
|
||||
ntfs_debug("Done.");
|
||||
}
|
||||
return err;
|
||||
old_bad_out:
|
||||
old_size = -1;
|
||||
bad_out:
|
||||
if (err != -ENOMEM && err != -EOPNOTSUPP) {
|
||||
make_bad_inode(vi);
|
||||
make_bad_inode(VFS_I(base_ni));
|
||||
NVolSetErrors(vol);
|
||||
}
|
||||
if (err != -EOPNOTSUPP)
|
||||
NInoSetTruncateFailed(ni);
|
||||
else if (old_size >= 0)
|
||||
i_size_write(vi, old_size);
|
||||
err_out:
|
||||
if (ctx)
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
if (m)
|
||||
unmap_mft_record(ni);
|
||||
NInoSetTruncateFailed(ni);
|
||||
unmap_mft_record(base_ni);
|
||||
up_write(&ni->runlist.lock);
|
||||
out:
|
||||
ntfs_debug("Failed. Returning error code %i.", err);
|
||||
return err;
|
||||
conv_err_out:
|
||||
if (err != -ENOMEM && err != -EOPNOTSUPP) {
|
||||
make_bad_inode(vi);
|
||||
make_bad_inode(VFS_I(base_ni));
|
||||
NVolSetErrors(vol);
|
||||
}
|
||||
if (err != -EOPNOTSUPP)
|
||||
NInoSetTruncateFailed(ni);
|
||||
else
|
||||
i_size_write(vi, old_size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2420,8 +2845,7 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
|
||||
err = inode_change_ok(vi, attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
goto out;
|
||||
/* We do not support NTFS ACLs yet. */
|
||||
if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) {
|
||||
ntfs_warning(vi->i_sb, "Changes in user/group/mode are not "
|
||||
|
@ -2429,14 +2853,22 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ia_valid & ATTR_SIZE) {
|
||||
if (attr->ia_size != i_size_read(vi)) {
|
||||
ntfs_warning(vi->i_sb, "Changes in inode size are not "
|
||||
"supported yet, ignoring.");
|
||||
err = -EOPNOTSUPP;
|
||||
// TODO: Implement...
|
||||
// err = vmtruncate(vi, attr->ia_size);
|
||||
ntfs_inode *ni = NTFS_I(vi);
|
||||
/*
|
||||
* FIXME: For now we do not support resizing of
|
||||
* compressed or encrypted files yet.
|
||||
*/
|
||||
if (NInoCompressed(ni) || NInoEncrypted(ni)) {
|
||||
ntfs_warning(vi->i_sb, "Changes in inode size "
|
||||
"are not supported yet for "
|
||||
"%s files, ignoring.",
|
||||
NInoCompressed(ni) ?
|
||||
"compressed" : "encrypted");
|
||||
err = -EOPNOTSUPP;
|
||||
} else
|
||||
err = vmtruncate(vi, attr->ia_size);
|
||||
if (err || ia_valid == ATTR_SIZE)
|
||||
goto out;
|
||||
} else {
|
||||
|
|
|
@ -1021,10 +1021,17 @@ enum {
|
|||
FILE_NAME_POSIX = 0x00,
|
||||
/* This is the largest namespace. It is case sensitive and allows all
|
||||
Unicode characters except for: '\0' and '/'. Beware that in
|
||||
WinNT/2k files which eg have the same name except for their case
|
||||
will not be distinguished by the standard utilities and thus a "del
|
||||
filename" will delete both "filename" and "fileName" without
|
||||
warning. */
|
||||
WinNT/2k/2003 by default files which eg have the same name except
|
||||
for their case will not be distinguished by the standard utilities
|
||||
and thus a "del filename" will delete both "filename" and "fileName"
|
||||
without warning. However if for example Services For Unix (SFU) are
|
||||
installed and the case sensitive option was enabled at installation
|
||||
time, then you can create/access/delete such files.
|
||||
Note that even SFU places restrictions on the filenames beyond the
|
||||
'\0' and '/' and in particular the following set of characters is
|
||||
not allowed: '"', '/', '<', '>', '\'. All other characters,
|
||||
including the ones no allowed in WIN32 namespace are allowed.
|
||||
Tested with SFU 3.5 (this is now free) running on Windows XP. */
|
||||
FILE_NAME_WIN32 = 0x01,
|
||||
/* The standard WinNT/2k NTFS long filenames. Case insensitive. All
|
||||
Unicode chars except: '\0', '"', '*', '/', ':', '<', '>', '?', '\',
|
||||
|
@ -2367,7 +2374,9 @@ typedef struct {
|
|||
* Extended attribute flags (8-bit).
|
||||
*/
|
||||
enum {
|
||||
NEED_EA = 0x80
|
||||
NEED_EA = 0x80 /* If set the file to which the EA belongs
|
||||
cannot be interpreted without understanding
|
||||
the associates extended attributes. */
|
||||
} __attribute__ ((__packed__));
|
||||
|
||||
typedef u8 EA_FLAGS;
|
||||
|
@ -2375,20 +2384,20 @@ typedef u8 EA_FLAGS;
|
|||
/*
|
||||
* Attribute: Extended attribute (EA) (0xe0).
|
||||
*
|
||||
* NOTE: Always non-resident. (Is this true?)
|
||||
* NOTE: Can be resident or non-resident.
|
||||
*
|
||||
* Like the attribute list and the index buffer list, the EA attribute value is
|
||||
* a sequence of EA_ATTR variable length records.
|
||||
*
|
||||
* FIXME: It appears weird that the EA name is not unicode. Is it true?
|
||||
*/
|
||||
typedef struct {
|
||||
le32 next_entry_offset; /* Offset to the next EA_ATTR. */
|
||||
EA_FLAGS flags; /* Flags describing the EA. */
|
||||
u8 ea_name_length; /* Length of the name of the EA in bytes. */
|
||||
u8 ea_name_length; /* Length of the name of the EA in bytes
|
||||
excluding the '\0' byte terminator. */
|
||||
le16 ea_value_length; /* Byte size of the EA's value. */
|
||||
u8 ea_name[0]; /* Name of the EA. */
|
||||
u8 ea_value[0]; /* The value of the EA. Immediately follows
|
||||
u8 ea_name[0]; /* Name of the EA. Note this is ASCII, not
|
||||
Unicode and it is zero terminated. */
|
||||
u8 ea_value[0]; /* The value of the EA. Immediately follows
|
||||
the name. */
|
||||
} __attribute__ ((__packed__)) EA_ATTR;
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
|
|||
* @count: number of clusters to allocate
|
||||
* @start_lcn: starting lcn at which to allocate the clusters (or -1 if none)
|
||||
* @zone: zone from which to allocate the clusters
|
||||
* @is_extension: if TRUE, this is an attribute extension
|
||||
*
|
||||
* Allocate @count clusters preferably starting at cluster @start_lcn or at the
|
||||
* current allocator position if @start_lcn is -1, on the mounted ntfs volume
|
||||
|
@ -86,6 +87,13 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
|
|||
* @start_vcn specifies the vcn of the first allocated cluster. This makes
|
||||
* merging the resulting runlist with the old runlist easier.
|
||||
*
|
||||
* If @is_extension is TRUE, the caller is allocating clusters to extend an
|
||||
* attribute and if it is FALSE, the caller is allocating clusters to fill a
|
||||
* hole in an attribute. Practically the difference is that if @is_extension
|
||||
* is TRUE the returned runlist will be terminated with LCN_ENOENT and if
|
||||
* @is_extension is FALSE the runlist will be terminated with
|
||||
* LCN_RL_NOT_MAPPED.
|
||||
*
|
||||
* You need to check the return value with IS_ERR(). If this is false, the
|
||||
* function was successful and the return value is a runlist describing the
|
||||
* allocated cluster(s). If IS_ERR() is true, the function failed and
|
||||
|
@ -137,7 +145,8 @@ int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
|
|||
*/
|
||||
runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
|
||||
const s64 count, const LCN start_lcn,
|
||||
const NTFS_CLUSTER_ALLOCATION_ZONES zone)
|
||||
const NTFS_CLUSTER_ALLOCATION_ZONES zone,
|
||||
const BOOL is_extension)
|
||||
{
|
||||
LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn;
|
||||
LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size;
|
||||
|
@ -310,7 +319,7 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
|
|||
continue;
|
||||
}
|
||||
bit = 1 << (lcn & 7);
|
||||
ntfs_debug("bit %i.", bit);
|
||||
ntfs_debug("bit 0x%x.", bit);
|
||||
/* If the bit is already set, go onto the next one. */
|
||||
if (*byte & bit) {
|
||||
lcn++;
|
||||
|
@ -729,7 +738,7 @@ switch_to_data1_zone: search_zone = 2;
|
|||
/* Add runlist terminator element. */
|
||||
if (likely(rl)) {
|
||||
rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length;
|
||||
rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
|
||||
rl[rlpos].lcn = is_extension ? LCN_ENOENT : LCN_RL_NOT_MAPPED;
|
||||
rl[rlpos].length = 0;
|
||||
}
|
||||
if (likely(page && !IS_ERR(page))) {
|
||||
|
@ -782,6 +791,7 @@ switch_to_data1_zone: search_zone = 2;
|
|||
* @ni: ntfs inode whose runlist describes the clusters to free
|
||||
* @start_vcn: vcn in the runlist of @ni at which to start freeing clusters
|
||||
* @count: number of clusters to free or -1 for all clusters
|
||||
* @ctx: active attribute search context if present or NULL if not
|
||||
* @is_rollback: true if this is a rollback operation
|
||||
*
|
||||
* Free @count clusters starting at the cluster @start_vcn in the runlist
|
||||
|
@ -791,15 +801,39 @@ switch_to_data1_zone: search_zone = 2;
|
|||
* deallocated. Thus, to completely free all clusters in a runlist, use
|
||||
* @start_vcn = 0 and @count = -1.
|
||||
*
|
||||
* If @ctx is specified, it is an active search context of @ni and its base mft
|
||||
* record. This is needed when __ntfs_cluster_free() encounters unmapped
|
||||
* runlist fragments and allows their mapping. If you do not have the mft
|
||||
* record mapped, you can specify @ctx as NULL and __ntfs_cluster_free() will
|
||||
* perform the necessary mapping and unmapping.
|
||||
*
|
||||
* Note, __ntfs_cluster_free() saves the state of @ctx on entry and restores it
|
||||
* before returning. Thus, @ctx will be left pointing to the same attribute on
|
||||
* return as on entry. However, the actual pointers in @ctx may point to
|
||||
* different memory locations on return, so you must remember to reset any
|
||||
* cached pointers from the @ctx, i.e. after the call to __ntfs_cluster_free(),
|
||||
* you will probably want to do:
|
||||
* m = ctx->mrec;
|
||||
* a = ctx->attr;
|
||||
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
|
||||
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
|
||||
*
|
||||
* @is_rollback should always be FALSE, it is for internal use to rollback
|
||||
* errors. You probably want to use ntfs_cluster_free() instead.
|
||||
*
|
||||
* Note, ntfs_cluster_free() does not modify the runlist at all, so the caller
|
||||
* has to deal with it later.
|
||||
* Note, __ntfs_cluster_free() does not modify the runlist, so you have to
|
||||
* remove from the runlist or mark sparse the freed runs later.
|
||||
*
|
||||
* Return the number of deallocated clusters (not counting sparse ones) on
|
||||
* success and -errno on error.
|
||||
*
|
||||
* WARNING: If @ctx is supplied, regardless of whether success or failure is
|
||||
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx
|
||||
* is no longer valid, i.e. you need to either call
|
||||
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
|
||||
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
|
||||
* why the mapping of the old inode failed.
|
||||
*
|
||||
* Locking: - The runlist described by @ni must be locked for writing on entry
|
||||
* and is locked on return. Note the runlist may be modified when
|
||||
* needed runlist fragments need to be mapped.
|
||||
|
@ -807,9 +841,13 @@ switch_to_data1_zone: search_zone = 2;
|
|||
* on return.
|
||||
* - This function takes the volume lcn bitmap lock for writing and
|
||||
* modifies the bitmap contents.
|
||||
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
|
||||
* entry and it will be left unmapped on return.
|
||||
* - If @ctx is not NULL, the base mft record must be mapped on entry
|
||||
* and it will be left mapped on return.
|
||||
*/
|
||||
s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
|
||||
const BOOL is_rollback)
|
||||
ntfs_attr_search_ctx *ctx, const BOOL is_rollback)
|
||||
{
|
||||
s64 delta, to_free, total_freed, real_freed;
|
||||
ntfs_volume *vol;
|
||||
|
@ -839,7 +877,7 @@ s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
|
|||
|
||||
total_freed = real_freed = 0;
|
||||
|
||||
rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, TRUE);
|
||||
rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, ctx);
|
||||
if (IS_ERR(rl)) {
|
||||
if (!is_rollback)
|
||||
ntfs_error(vol->sb, "Failed to find first runlist "
|
||||
|
@ -893,7 +931,7 @@ s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
|
|||
|
||||
/* Attempt to map runlist. */
|
||||
vcn = rl->vcn;
|
||||
rl = ntfs_attr_find_vcn_nolock(ni, vcn, TRUE);
|
||||
rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx);
|
||||
if (IS_ERR(rl)) {
|
||||
err = PTR_ERR(rl);
|
||||
if (!is_rollback)
|
||||
|
@ -961,7 +999,7 @@ s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
|
|||
* If rollback fails, set the volume errors flag, emit an error
|
||||
* message, and return the error code.
|
||||
*/
|
||||
delta = __ntfs_cluster_free(ni, start_vcn, total_freed, TRUE);
|
||||
delta = __ntfs_cluster_free(ni, start_vcn, total_freed, ctx, TRUE);
|
||||
if (delta < 0) {
|
||||
ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving "
|
||||
"inconsistent metadata! Unmount and run "
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "attrib.h"
|
||||
#include "types.h"
|
||||
#include "inode.h"
|
||||
#include "runlist.h"
|
||||
|
@ -41,16 +42,18 @@ typedef enum {
|
|||
|
||||
extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol,
|
||||
const VCN start_vcn, const s64 count, const LCN start_lcn,
|
||||
const NTFS_CLUSTER_ALLOCATION_ZONES zone);
|
||||
const NTFS_CLUSTER_ALLOCATION_ZONES zone,
|
||||
const BOOL is_extension);
|
||||
|
||||
extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
|
||||
s64 count, const BOOL is_rollback);
|
||||
s64 count, ntfs_attr_search_ctx *ctx, const BOOL is_rollback);
|
||||
|
||||
/**
|
||||
* ntfs_cluster_free - free clusters on an ntfs volume
|
||||
* @ni: ntfs inode whose runlist describes the clusters to free
|
||||
* @start_vcn: vcn in the runlist of @ni at which to start freeing clusters
|
||||
* @count: number of clusters to free or -1 for all clusters
|
||||
* @ctx: active attribute search context if present or NULL if not
|
||||
*
|
||||
* Free @count clusters starting at the cluster @start_vcn in the runlist
|
||||
* described by the ntfs inode @ni.
|
||||
|
@ -59,12 +62,36 @@ extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
|
|||
* deallocated. Thus, to completely free all clusters in a runlist, use
|
||||
* @start_vcn = 0 and @count = -1.
|
||||
*
|
||||
* Note, ntfs_cluster_free() does not modify the runlist at all, so the caller
|
||||
* has to deal with it later.
|
||||
* If @ctx is specified, it is an active search context of @ni and its base mft
|
||||
* record. This is needed when ntfs_cluster_free() encounters unmapped runlist
|
||||
* fragments and allows their mapping. If you do not have the mft record
|
||||
* mapped, you can specify @ctx as NULL and ntfs_cluster_free() will perform
|
||||
* the necessary mapping and unmapping.
|
||||
*
|
||||
* Note, ntfs_cluster_free() saves the state of @ctx on entry and restores it
|
||||
* before returning. Thus, @ctx will be left pointing to the same attribute on
|
||||
* return as on entry. However, the actual pointers in @ctx may point to
|
||||
* different memory locations on return, so you must remember to reset any
|
||||
* cached pointers from the @ctx, i.e. after the call to ntfs_cluster_free(),
|
||||
* you will probably want to do:
|
||||
* m = ctx->mrec;
|
||||
* a = ctx->attr;
|
||||
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
|
||||
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
|
||||
*
|
||||
* Note, ntfs_cluster_free() does not modify the runlist, so you have to remove
|
||||
* from the runlist or mark sparse the freed runs later.
|
||||
*
|
||||
* Return the number of deallocated clusters (not counting sparse ones) on
|
||||
* success and -errno on error.
|
||||
*
|
||||
* WARNING: If @ctx is supplied, regardless of whether success or failure is
|
||||
* returned, you need to check IS_ERR(@ctx->mrec) and if TRUE the @ctx
|
||||
* is no longer valid, i.e. you need to either call
|
||||
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
|
||||
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
|
||||
* why the mapping of the old inode failed.
|
||||
*
|
||||
* Locking: - The runlist described by @ni must be locked for writing on entry
|
||||
* and is locked on return. Note the runlist may be modified when
|
||||
* needed runlist fragments need to be mapped.
|
||||
|
@ -72,11 +99,15 @@ extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
|
|||
* on return.
|
||||
* - This function takes the volume lcn bitmap lock for writing and
|
||||
* modifies the bitmap contents.
|
||||
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
|
||||
* entry and it will be left unmapped on return.
|
||||
* - If @ctx is not NULL, the base mft record must be mapped on entry
|
||||
* and it will be left mapped on return.
|
||||
*/
|
||||
static inline s64 ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn,
|
||||
s64 count)
|
||||
s64 count, ntfs_attr_search_ctx *ctx)
|
||||
{
|
||||
return __ntfs_cluster_free(ni, start_vcn, count, FALSE);
|
||||
return __ntfs_cluster_free(ni, start_vcn, count, ctx, FALSE);
|
||||
}
|
||||
|
||||
extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
|
||||
|
|
|
@ -39,8 +39,7 @@
|
|||
* If there was insufficient memory to complete the request, return NULL.
|
||||
* Depending on @gfp_mask the allocation may be guaranteed to succeed.
|
||||
*/
|
||||
static inline void *__ntfs_malloc(unsigned long size,
|
||||
gfp_t gfp_mask)
|
||||
static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
|
||||
{
|
||||
if (likely(size <= PAGE_SIZE)) {
|
||||
BUG_ON(!size);
|
||||
|
|
|
@ -49,7 +49,8 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
|
|||
ntfs_volume *vol = ni->vol;
|
||||
struct inode *mft_vi = vol->mft_ino;
|
||||
struct page *page;
|
||||
unsigned long index, ofs, end_index;
|
||||
unsigned long index, end_index;
|
||||
unsigned ofs;
|
||||
|
||||
BUG_ON(ni->page);
|
||||
/*
|
||||
|
@ -1308,7 +1309,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
|
|||
ll = mftbmp_ni->allocated_size;
|
||||
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
|
||||
rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
|
||||
(ll - 1) >> vol->cluster_size_bits, TRUE);
|
||||
(ll - 1) >> vol->cluster_size_bits, NULL);
|
||||
if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
|
||||
up_write(&mftbmp_ni->runlist.lock);
|
||||
ntfs_error(vol->sb, "Failed to determine last allocated "
|
||||
|
@ -1354,7 +1355,8 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
|
|||
up_write(&vol->lcnbmp_lock);
|
||||
ntfs_unmap_page(page);
|
||||
/* Allocate a cluster from the DATA_ZONE. */
|
||||
rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE);
|
||||
rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE,
|
||||
TRUE);
|
||||
if (IS_ERR(rl2)) {
|
||||
up_write(&mftbmp_ni->runlist.lock);
|
||||
ntfs_error(vol->sb, "Failed to allocate a cluster for "
|
||||
|
@ -1738,7 +1740,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
|
|||
ll = mft_ni->allocated_size;
|
||||
read_unlock_irqrestore(&mft_ni->size_lock, flags);
|
||||
rl = ntfs_attr_find_vcn_nolock(mft_ni,
|
||||
(ll - 1) >> vol->cluster_size_bits, TRUE);
|
||||
(ll - 1) >> vol->cluster_size_bits, NULL);
|
||||
if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
|
||||
up_write(&mft_ni->runlist.lock);
|
||||
ntfs_error(vol->sb, "Failed to determine last allocated "
|
||||
|
@ -1779,7 +1781,8 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
|
|||
nr > min_nr ? "default" : "minimal", (long long)nr);
|
||||
old_last_vcn = rl[1].vcn;
|
||||
do {
|
||||
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE);
|
||||
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
|
||||
TRUE);
|
||||
if (likely(!IS_ERR(rl2)))
|
||||
break;
|
||||
if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
|
||||
|
@ -1951,20 +1954,21 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
|
|||
NVolSetErrors(vol);
|
||||
return ret;
|
||||
}
|
||||
a = ctx->attr;
|
||||
a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1);
|
||||
ctx->attr->data.non_resident.highest_vcn =
|
||||
cpu_to_sle64(old_last_vcn - 1);
|
||||
undo_alloc:
|
||||
if (ntfs_cluster_free(mft_ni, old_last_vcn, -1) < 0) {
|
||||
if (ntfs_cluster_free(mft_ni, old_last_vcn, -1, ctx) < 0) {
|
||||
ntfs_error(vol->sb, "Failed to free clusters from mft data "
|
||||
"attribute.%s", es);
|
||||
NVolSetErrors(vol);
|
||||
}
|
||||
a = ctx->attr;
|
||||
if (ntfs_rl_truncate_nolock(vol, &mft_ni->runlist, old_last_vcn)) {
|
||||
ntfs_error(vol->sb, "Failed to truncate mft data attribute "
|
||||
"runlist.%s", es);
|
||||
NVolSetErrors(vol);
|
||||
}
|
||||
if (mp_rebuilt) {
|
||||
if (mp_rebuilt && !IS_ERR(ctx->mrec)) {
|
||||
if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
|
||||
a->data.non_resident.mapping_pairs_offset),
|
||||
old_alen - le16_to_cpu(
|
||||
|
@ -1981,6 +1985,10 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
|
|||
}
|
||||
flush_dcache_mft_record_page(ctx->ntfs_ino);
|
||||
mark_mft_record_dirty(ctx->ntfs_ino);
|
||||
} else if (IS_ERR(ctx->mrec)) {
|
||||
ntfs_error(vol->sb, "Failed to restore attribute search "
|
||||
"context.%s", es);
|
||||
NVolSetErrors(vol);
|
||||
}
|
||||
if (ctx)
|
||||
ntfs_attr_put_search_ctx(ctx);
|
||||
|
|
|
@ -1447,7 +1447,7 @@ static BOOL load_and_init_usnjrnl(ntfs_volume *vol)
|
|||
if (unlikely(i_size_read(tmp_ino) < sizeof(USN_HEADER))) {
|
||||
ntfs_error(vol->sb, "Found corrupt $UsnJrnl/$DATA/$Max "
|
||||
"attribute (size is 0x%llx but should be at "
|
||||
"least 0x%x bytes).", i_size_read(tmp_ino),
|
||||
"least 0x%zx bytes).", i_size_read(tmp_ino),
|
||||
sizeof(USN_HEADER));
|
||||
return FALSE;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue