x86: break up mtrr_cleanup() into several small functions.

Ingo said mtrr_cleanup() is big and ugly.

so break it up into more functions and make it more readable.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Yinghai Lu 2008-10-14 18:59:18 -07:00 committed by Ingo Molnar
parent 49fdf6785f
commit 30604bb410

View file

@ -803,6 +803,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
}
static struct res_range __initdata range[RANGE_NUM];
static int __initdata nr_range;
#ifdef CONFIG_MTRR_SANITIZER
@ -1206,40 +1207,44 @@ struct mtrr_cleanup_result {
#define PSHIFT (PAGE_SHIFT - 10)
static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
static struct res_range __initdata range_new[RANGE_NUM];
static unsigned long __initdata min_loss_pfn[RANGE_NUM];
static int __init mtrr_cleanup(unsigned address_bits)
static void __init print_out_mtrr_range_state(void)
{
unsigned long extra_remove_base, extra_remove_size;
unsigned long base, size, def, dummy;
mtrr_type type;
int nr_range, nr_range_new;
u64 chunk_size, gran_size;
unsigned long range_sums, range_sums_new;
int index_good;
int num_reg_good;
int i;
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
mtrr_type type;
for (i = 0; i < num_var_ranges; i++) {
size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
if (!size_base)
continue;
size_base = to_size_factor(size_base, &size_factor),
start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
start_base = to_size_factor(start_base, &start_factor),
type = range_state[i].type;
printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
i, start_base, start_factor,
size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" :
((type == MTRR_TYPE_WRPROT) ? "WP" :
((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
);
}
}
static int __init mtrr_need_cleanup(void)
{
int i;
mtrr_type type;
unsigned long size;
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
return 0;
rdmsr(MTRRdefType_MSR, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
/* get it and store it aside */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
range_state[i].base_pfn = base;
range_state[i].size_pfn = size;
range_state[i].type = type;
}
/* check entries number */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
@ -1263,124 +1268,19 @@ static int __init mtrr_cleanup(unsigned address_bits)
num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
/* print original var MTRRs at first, for debugging: */
printk(KERN_DEBUG "original variable MTRRs\n");
for (i = 0; i < num_var_ranges; i++) {
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
if (!size_base)
continue;
size_base = to_size_factor(size_base, &size_factor),
start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
start_base = to_size_factor(start_base, &start_factor),
type = range_state[i].type;
printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
i, start_base, start_factor,
size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" :
((type == MTRR_TYPE_WRPROT) ? "WP" :
((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
);
}
memset(range, 0, sizeof(range));
extra_remove_size = 0;
extra_remove_base = 1 << (32 - PAGE_SHIFT);
if (mtrr_tom2)
extra_remove_size =
(mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
extra_remove_size);
/*
* [0, 1M) should always be coverred by var mtrr with WB
* and fixed mtrrs should take effective before var mtrr for it
*/
nr_range = add_range_with_merge(range, nr_range, 0,
(1ULL<<(20 - PAGE_SHIFT)) - 1);
/* sort the ranges */
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM coverred: %ldM\n",
range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) {
int num_reg;
char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
debug_print++;
/* convert ranges to var ranges state */
num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
mtrr_gran_size);
/* we got new setting in range_state, check it */
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
extra_remove_base,
extra_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
i = 0;
result[i].chunk_sizek = mtrr_chunk_size >> 10;
result[i].gran_sizek = mtrr_gran_size >> 10;
result[i].num_reg = num_reg;
if (range_sums < range_sums_new) {
result[i].lose_cover_sizek =
(range_sums_new - range_sums) << PSHIFT;
result[i].bad = 1;
} else
result[i].lose_cover_sizek =
(range_sums - range_sums_new) << PSHIFT;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
result[i].bad?"*BAD*":" ",
gran_base, gran_factor, chunk_base, chunk_factor);
printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
result[i].num_reg, result[i].bad?"-":"",
lose_base, lose_factor);
if (!result[i].bad) {
set_var_mtrr_all(address_bits);
return 1;
}
printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
"will find optimal one\n");
debug_print--;
memset(result, 0, sizeof(result[0]));
}
}
i = 0;
memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
memset(result, 0, sizeof(result));
for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
char gran_factor;
unsigned long gran_base;
if (debug_print)
gran_base = to_size_factor(gran_size >> 10, &gran_factor);
for (chunk_size = gran_size; chunk_size < (1ULL<<32);
chunk_size <<= 1) {
static unsigned long __initdata range_sums;
static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
unsigned long extra_remove_base,
unsigned long extra_remove_size,
int i)
{
int num_reg;
if (debug_print) {
char chunk_factor;
unsigned long chunk_base;
chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
printk(KERN_INFO "\n");
printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
gran_base, gran_factor, chunk_base, chunk_factor);
}
if (i >= NUM_RESULT)
continue;
static struct res_range range_new[RANGE_NUM];
static int nr_range_new;
unsigned long range_sums_new;
/* convert ranges to var ranges state */
num_reg = x86_setup_var_mtrrs(range, nr_range,
@ -1415,12 +1315,10 @@ static int __init mtrr_cleanup(unsigned address_bits)
min_loss_pfn[num_reg] =
range_sums - range_sums_new;
}
i++;
}
}
}
/* print out all */
for (i = 0; i < NUM_RESULT; i++) {
static void __init mtrr_print_out_one_result(int i)
{
char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
@ -1428,14 +1326,19 @@ static int __init mtrr_cleanup(unsigned address_bits)
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
result[i].bad?"*BAD*":" ",
result[i].bad ? "*BAD*" : " ",
gran_base, gran_factor, chunk_base, chunk_factor);
printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
result[i].num_reg, result[i].bad?"-":"",
result[i].num_reg, result[i].bad ? "-" : "",
lose_base, lose_factor);
}
}
static int __init mtrr_search_optimal_index(void)
{
int i;
int num_reg_good;
int index_good;
/* try to find the optimal index */
if (nr_mtrr_spare_reg >= num_var_ranges)
nr_mtrr_spare_reg = num_var_ranges - 1;
num_reg_good = -1;
@ -1456,29 +1359,123 @@ static int __init mtrr_cleanup(unsigned address_bits)
}
}
if (index_good != -1) {
char gran_factor, chunk_factor, lose_factor;
unsigned long gran_base, chunk_base, lose_base;
return index_good;
}
static int __init mtrr_cleanup(unsigned address_bits)
{
unsigned long extra_remove_base, extra_remove_size;
unsigned long base, size, def, dummy;
mtrr_type type;
u64 chunk_size, gran_size;
int index_good;
int i;
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
return 0;
rdmsr(MTRRdefType_MSR, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
/* get it and store it aside */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
range_state[i].base_pfn = base;
range_state[i].size_pfn = size;
range_state[i].type = type;
}
/* check if we need handle it and can handle it */
if (!mtrr_need_cleanup())
return 0;
/* print original var MTRRs at first, for debugging: */
printk(KERN_DEBUG "original variable MTRRs\n");
print_out_mtrr_range_state();
memset(range, 0, sizeof(range));
extra_remove_size = 0;
extra_remove_base = 1 << (32 - PAGE_SHIFT);
if (mtrr_tom2)
extra_remove_size =
(mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
extra_remove_size);
/*
* [0, 1M) should always be coverred by var mtrr with WB
* and fixed mtrrs should take effective before var mtrr for it
*/
nr_range = add_range_with_merge(range, nr_range, 0,
(1ULL<<(20 - PAGE_SHIFT)) - 1);
/* sort the ranges */
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM coverred: %ldM\n",
range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) {
i = 0;
mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
extra_remove_base, extra_remove_size, i);
mtrr_print_out_one_result(i);
if (!result[i].bad) {
set_var_mtrr_all(address_bits);
return 1;
}
printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
"will find optimal one\n");
}
i = 0;
memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
memset(result, 0, sizeof(result));
for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
for (chunk_size = gran_size; chunk_size < (1ULL<<32);
chunk_size <<= 1) {
if (i >= NUM_RESULT)
continue;
mtrr_calc_range_state(chunk_size, gran_size,
extra_remove_base, extra_remove_size, i);
if (debug_print) {
mtrr_print_out_one_result(i);
printk(KERN_INFO "\n");
}
i++;
}
}
/* try to find the optimal index */
index_good = mtrr_search_optimal_index();
if (index_good != -1) {
printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
i = index_good;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
gran_base, gran_factor, chunk_base, chunk_factor);
printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
result[i].num_reg, lose_base, lose_factor);
mtrr_print_out_one_result(i);
/* convert ranges to var ranges state */
chunk_size = result[i].chunk_sizek;
chunk_size <<= 10;
gran_size = result[i].gran_sizek;
gran_size <<= 10;
debug_print++;
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
debug_print--;
set_var_mtrr_all(address_bits);
printk(KERN_DEBUG "New variable MTRRs\n");
print_out_mtrr_range_state();
return 1;
} else {
/* print out all */
for (i = 0; i < NUM_RESULT; i++)
mtrr_print_out_one_result(i);
}
printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
@ -1562,7 +1559,6 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{
unsigned long i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type;
int nr_range;
u64 total_trim_size;
/* extra one for all 0 */