Merge branch 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux

* 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux:
  writeback: remove vm_dirties and task->dirties
  writeback: hard throttle 1000+ dd on a slow USB stick
  mm: Make task in balance_dirty_pages() killable
This commit is contained in:
Linus Torvalds 2011-11-22 08:22:48 -08:00
commit 8ba8ed54de
4 changed files with 7 additions and 23 deletions

View file

@ -184,7 +184,6 @@ extern struct cred init_cred;
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \

View file

@ -1521,7 +1521,6 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
struct prop_local_single dirties;
/*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause

View file

@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
void free_task(struct task_struct *tsk)
{
prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
if (err)
goto out;
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);

View file

@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
*
*/
static struct prop_descriptor vm_completions;
static struct prop_descriptor vm_dirties;
/*
* couple the period to the dirty_ratio:
@ -154,7 +153,6 @@ static void update_completion_period(void)
{
int shift = calc_period_shift();
prop_change_shift(&vm_completions, shift);
prop_change_shift(&vm_dirties, shift);
writeback_set_ratelimit();
}
@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);
void task_dirty_inc(struct task_struct *tsk)
{
prop_inc_single(&vm_dirties, &tsk->dirties);
}
/*
* Obtain an accurate fraction of the BDI's portion.
*/
@ -1133,17 +1126,17 @@ static void balance_dirty_pages(struct address_space *mapping,
pages_dirtied,
pause,
start_time);
__set_current_state(TASK_UNINTERRUPTIBLE);
__set_current_state(TASK_KILLABLE);
io_schedule_timeout(pause);
dirty_thresh = hard_dirty_limit(dirty_thresh);
/*
* max-pause area. If dirty exceeded but still within this
* area, no need to sleep for more than 200ms: (a) 8 pages per
* 200ms is typically more than enough to curb heavy dirtiers;
* (b) the pause time limit makes the dirtiers more responsive.
* This is typically equal to (nr_dirty < dirty_thresh) and can
* also keep "1000+ dd on a slow USB stick" under control.
*/
if (nr_dirty < dirty_thresh)
if (task_ratelimit)
break;
if (fatal_signal_pending(current))
break;
}
@ -1395,7 +1388,6 @@ void __init page_writeback_init(void)
shift = calc_period_shift();
prop_descriptor_init(&vm_completions, shift);
prop_descriptor_init(&vm_dirties, shift);
}
/**
@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
__inc_zone_page_state(page, NR_DIRTIED);
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
}