xfs: unmount does not wait for shutdown during unmount

And interesting situation can occur if a log IO error occurs during
the unmount of a filesystem. The cases reported have the same
signature - the update of the superblock counters fails due to a log
write IO error:

XFS (dm-16): xfs_do_force_shutdown(0x2) called from line 1170 of file fs/xfs/xfs_log.c.  Return address = 0xffffffffa08a44a1
XFS (dm-16): Log I/O Error Detected.  Shutting down filesystem
XFS (dm-16): Unable to update superblock counters. Freespace may not be correct on next mount.
XFS (dm-16): xfs_log_force: error 5 returned.
XFS (¿-¿¿¿): Please umount the filesystem and rectify the problem(s)

It can be seen that the last line of output contains a corrupt
device name - this is because the log and xfs_mount structures have
already been freed by the time this message is printed. A kernel
oops closely follows.

The issue is that the shutdown is occurring in a separate IO
completion thread to the unmount. Once the shutdown processing has
started and all the iclogs are marked with XLOG_STATE_IOERROR, the
log shutdown code wakes anyone waiting on a log force so they can
process the shutdown error. This wakes up the unmount code that
is doing a synchronous transaction to update the superblock
counters.

The unmount path now sees all the iclogs are marked with
XLOG_STATE_IOERROR and so never waits on them again, knowing that if
it does, there will not be a wakeup trigger for it and we will hang
the unmount if we do. Hence the unmount runs through all the
remaining code and frees all the filesystem structures while the
xlog_iodone() is still processing the shutdown. When the log
shutdown processing completes, xfs_do_force_shutdown() emits the
"Please umount the filesystem and rectify the problem(s)" message,
and xlog_iodone() then aborts all the objects attached to the iclog.
An iclog that has already been freed....

The real issue here is that there is no serialisation point between
the log IO and the unmount. We have serialisations points for log
writes, log forces, reservations, etc, but we don't actually have
any code that wakes for log IO to fully complete. We do that for all
other types of object, so why not iclogbufs?

Well, it turns out that we can easily do this. We've got xfs_buf
handles, and that's what everyone else uses for IO serialisation.
i.e. bp->b_sema. So, lets hold iclogbufs locked over IO, and only
release the lock in xlog_iodone() when we are finished with the
buffer. That way before we tear down the iclog, we can lock and
unlock the buffer to ensure IO completion has finished completely
before we tear it down.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Tested-by: Mike Snitzer <snitzer@redhat.com>
Tested-by: Bob Mastors <bob.mastors@solidfire.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
This commit is contained in:
Dave Chinner 2014-04-17 08:15:26 +10:00 committed by Dave Chinner
parent d39a2ced0f
commit 9c23eccc1e

View file

@ -1181,11 +1181,14 @@ xlog_iodone(xfs_buf_t *bp)
/* log I/O is always issued ASYNC */ /* log I/O is always issued ASYNC */
ASSERT(XFS_BUF_ISASYNC(bp)); ASSERT(XFS_BUF_ISASYNC(bp));
xlog_state_done_syncing(iclog, aborted); xlog_state_done_syncing(iclog, aborted);
/* /*
* do not reference the buffer (bp) here as we could race * drop the buffer lock now that we are done. Nothing references
* with it being freed after writing the unmount record to the * the buffer after this, so an unmount waiting on this lock can now
* log. * tear it down safely. As such, it is unsafe to reference the buffer
* (bp) after the unlock as we could race with it being freed.
*/ */
xfs_buf_unlock(bp);
} }
/* /*
@ -1368,8 +1371,16 @@ xlog_alloc_log(
bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0); bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0);
if (!bp) if (!bp)
goto out_free_log; goto out_free_log;
bp->b_iodone = xlog_iodone;
/*
* The iclogbuf buffer locks are held over IO but we are not going to do
* IO yet. Hence unlock the buffer so that the log IO path can grab it
* when appropriately.
*/
ASSERT(xfs_buf_islocked(bp)); ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
bp->b_iodone = xlog_iodone;
log->l_xbuf = bp; log->l_xbuf = bp;
spin_lock_init(&log->l_icloglock); spin_lock_init(&log->l_icloglock);
@ -1398,6 +1409,9 @@ xlog_alloc_log(
if (!bp) if (!bp)
goto out_free_iclog; goto out_free_iclog;
ASSERT(xfs_buf_islocked(bp));
xfs_buf_unlock(bp);
bp->b_iodone = xlog_iodone; bp->b_iodone = xlog_iodone;
iclog->ic_bp = bp; iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr; iclog->ic_data = bp->b_addr;
@ -1422,7 +1436,6 @@ xlog_alloc_log(
iclog->ic_callback_tail = &(iclog->ic_callback); iclog->ic_callback_tail = &(iclog->ic_callback);
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
ASSERT(xfs_buf_islocked(iclog->ic_bp));
init_waitqueue_head(&iclog->ic_force_wait); init_waitqueue_head(&iclog->ic_force_wait);
init_waitqueue_head(&iclog->ic_write_wait); init_waitqueue_head(&iclog->ic_write_wait);
@ -1631,6 +1644,12 @@ xlog_cksum(
* we transition the iclogs to IOERROR state *after* flushing all existing * we transition the iclogs to IOERROR state *after* flushing all existing
* iclogs to disk. This is because we don't want anymore new transactions to be * iclogs to disk. This is because we don't want anymore new transactions to be
* started or completed afterwards. * started or completed afterwards.
*
* We lock the iclogbufs here so that we can serialise against IO completion
* during unmount. We might be processing a shutdown triggered during unmount,
* and that can occur asynchronously to the unmount thread, and hence we need to
* ensure that completes before tearing down the iclogbufs. Hence we need to
* hold the buffer lock across the log IO to acheive that.
*/ */
STATIC int STATIC int
xlog_bdstrat( xlog_bdstrat(
@ -1638,6 +1657,7 @@ xlog_bdstrat(
{ {
struct xlog_in_core *iclog = bp->b_fspriv; struct xlog_in_core *iclog = bp->b_fspriv;
xfs_buf_lock(bp);
if (iclog->ic_state & XLOG_STATE_IOERROR) { if (iclog->ic_state & XLOG_STATE_IOERROR) {
xfs_buf_ioerror(bp, EIO); xfs_buf_ioerror(bp, EIO);
xfs_buf_stale(bp); xfs_buf_stale(bp);
@ -1645,7 +1665,8 @@ xlog_bdstrat(
/* /*
* It would seem logical to return EIO here, but we rely on * It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of * the log state machine to propagate I/O errors instead of
* doing it here. * doing it here. Similarly, IO completion will unlock the
* buffer, so we don't do it here.
*/ */
return 0; return 0;
} }
@ -1847,14 +1868,28 @@ xlog_dealloc_log(
xlog_cil_destroy(log); xlog_cil_destroy(log);
/* /*
* always need to ensure that the extra buffer does not point to memory * Cycle all the iclogbuf locks to make sure all log IO completion
* owned by another log buffer before we free it. * is done before we tear down these buffers.
*/ */
iclog = log->l_iclog;
for (i = 0; i < log->l_iclog_bufs; i++) {
xfs_buf_lock(iclog->ic_bp);
xfs_buf_unlock(iclog->ic_bp);
iclog = iclog->ic_next;
}
/*
* Always need to ensure that the extra buffer does not point to memory
* owned by another log buffer before we free it. Also, cycle the lock
* first to ensure we've completed IO on it.
*/
xfs_buf_lock(log->l_xbuf);
xfs_buf_unlock(log->l_xbuf);
xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size));
xfs_buf_free(log->l_xbuf); xfs_buf_free(log->l_xbuf);
iclog = log->l_iclog; iclog = log->l_iclog;
for (i=0; i<log->l_iclog_bufs; i++) { for (i = 0; i < log->l_iclog_bufs; i++) {
xfs_buf_free(iclog->ic_bp); xfs_buf_free(iclog->ic_bp);
next_iclog = iclog->ic_next; next_iclog = iclog->ic_next;
kmem_free(iclog); kmem_free(iclog);