proportions: add @gfp to init functions
Percpu allocator now supports allocation mask. Add @gfp to [flex_]proportions init functions so that !GFP_KERNEL allocation masks can be used with them too. This patch doesn't make any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
parent
908c7f1949
commit
20ae00792c
6 changed files with 17 additions and 15 deletions
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/percpu_counter.h>
|
#include <linux/percpu_counter.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/seqlock.h>
|
#include <linux/seqlock.h>
|
||||||
|
#include <linux/gfp.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When maximum proportion of some event type is specified, this is the
|
* When maximum proportion of some event type is specified, this is the
|
||||||
|
@ -32,7 +33,7 @@ struct fprop_global {
|
||||||
seqcount_t sequence;
|
seqcount_t sequence;
|
||||||
};
|
};
|
||||||
|
|
||||||
int fprop_global_init(struct fprop_global *p);
|
int fprop_global_init(struct fprop_global *p, gfp_t gfp);
|
||||||
void fprop_global_destroy(struct fprop_global *p);
|
void fprop_global_destroy(struct fprop_global *p);
|
||||||
bool fprop_new_period(struct fprop_global *p, int periods);
|
bool fprop_new_period(struct fprop_global *p, int periods);
|
||||||
|
|
||||||
|
@ -79,7 +80,7 @@ struct fprop_local_percpu {
|
||||||
raw_spinlock_t lock; /* Protect period and numerator */
|
raw_spinlock_t lock; /* Protect period and numerator */
|
||||||
};
|
};
|
||||||
|
|
||||||
int fprop_local_init_percpu(struct fprop_local_percpu *pl);
|
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
|
||||||
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
|
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
|
||||||
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
|
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
|
||||||
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
|
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <linux/percpu_counter.h>
|
#include <linux/percpu_counter.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
|
#include <linux/gfp.h>
|
||||||
|
|
||||||
struct prop_global {
|
struct prop_global {
|
||||||
/*
|
/*
|
||||||
|
@ -40,7 +41,7 @@ struct prop_descriptor {
|
||||||
struct mutex mutex; /* serialize the prop_global switch */
|
struct mutex mutex; /* serialize the prop_global switch */
|
||||||
};
|
};
|
||||||
|
|
||||||
int prop_descriptor_init(struct prop_descriptor *pd, int shift);
|
int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
|
||||||
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
|
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -61,7 +62,7 @@ struct prop_local_percpu {
|
||||||
raw_spinlock_t lock; /* protect the snapshot state */
|
raw_spinlock_t lock; /* protect the snapshot state */
|
||||||
};
|
};
|
||||||
|
|
||||||
int prop_local_init_percpu(struct prop_local_percpu *pl);
|
int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
|
||||||
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
|
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
|
||||||
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
|
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
|
||||||
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
|
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
|
||||||
|
|
|
@ -34,13 +34,13 @@
|
||||||
*/
|
*/
|
||||||
#include <linux/flex_proportions.h>
|
#include <linux/flex_proportions.h>
|
||||||
|
|
||||||
int fprop_global_init(struct fprop_global *p)
|
int fprop_global_init(struct fprop_global *p, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
p->period = 0;
|
p->period = 0;
|
||||||
/* Use 1 to avoid dealing with periods with 0 events... */
|
/* Use 1 to avoid dealing with periods with 0 events... */
|
||||||
err = percpu_counter_init(&p->events, 1, GFP_KERNEL);
|
err = percpu_counter_init(&p->events, 1, gfp);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
seqcount_init(&p->sequence);
|
seqcount_init(&p->sequence);
|
||||||
|
@ -168,11 +168,11 @@ void fprop_fraction_single(struct fprop_global *p,
|
||||||
*/
|
*/
|
||||||
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
||||||
|
|
||||||
int fprop_local_init_percpu(struct fprop_local_percpu *pl)
|
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = percpu_counter_init(&pl->events, 0, GFP_KERNEL);
|
err = percpu_counter_init(&pl->events, 0, gfp);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
pl->period = 0;
|
pl->period = 0;
|
||||||
|
|
|
@ -73,7 +73,7 @@
|
||||||
#include <linux/proportions.h>
|
#include <linux/proportions.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
|
|
||||||
int prop_descriptor_init(struct prop_descriptor *pd, int shift)
|
int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
|
||||||
pd->index = 0;
|
pd->index = 0;
|
||||||
pd->pg[0].shift = shift;
|
pd->pg[0].shift = shift;
|
||||||
mutex_init(&pd->mutex);
|
mutex_init(&pd->mutex);
|
||||||
err = percpu_counter_init(&pd->pg[0].events, 0, GFP_KERNEL);
|
err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = percpu_counter_init(&pd->pg[1].events, 0, GFP_KERNEL);
|
err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
|
||||||
if (err)
|
if (err)
|
||||||
percpu_counter_destroy(&pd->pg[0].events);
|
percpu_counter_destroy(&pd->pg[0].events);
|
||||||
|
|
||||||
|
@ -188,12 +188,12 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
|
||||||
|
|
||||||
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
|
||||||
|
|
||||||
int prop_local_init_percpu(struct prop_local_percpu *pl)
|
int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
|
||||||
{
|
{
|
||||||
raw_spin_lock_init(&pl->lock);
|
raw_spin_lock_init(&pl->lock);
|
||||||
pl->shift = 0;
|
pl->shift = 0;
|
||||||
pl->period = 0;
|
pl->period = 0;
|
||||||
return percpu_counter_init(&pl->events, 0, GFP_KERNEL);
|
return percpu_counter_init(&pl->events, 0, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void prop_local_destroy_percpu(struct prop_local_percpu *pl)
|
void prop_local_destroy_percpu(struct prop_local_percpu *pl)
|
||||||
|
|
|
@ -470,7 +470,7 @@ int bdi_init(struct backing_dev_info *bdi)
|
||||||
bdi->write_bandwidth = INIT_BW;
|
bdi->write_bandwidth = INIT_BW;
|
||||||
bdi->avg_write_bandwidth = INIT_BW;
|
bdi->avg_write_bandwidth = INIT_BW;
|
||||||
|
|
||||||
err = fprop_local_init_percpu(&bdi->completions);
|
err = fprop_local_init_percpu(&bdi->completions, GFP_KERNEL);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
err:
|
err:
|
||||||
|
|
|
@ -1777,7 +1777,7 @@ void __init page_writeback_init(void)
|
||||||
writeback_set_ratelimit();
|
writeback_set_ratelimit();
|
||||||
register_cpu_notifier(&ratelimit_nb);
|
register_cpu_notifier(&ratelimit_nb);
|
||||||
|
|
||||||
fprop_global_init(&writeout_completions);
|
fprop_global_init(&writeout_completions, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in a new issue