drm: move radeon_fixed.h to shared drm_fixed.h header
Will be used by nouveau driver also in the near future. Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
15a7df8db8
commit
68adac5e49
9 changed files with 398 additions and 398 deletions
|
@ -26,7 +26,7 @@
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include <drm/drm_crtc_helper.h>
|
#include <drm/drm_crtc_helper.h>
|
||||||
#include <drm/radeon_drm.h>
|
#include <drm/radeon_drm.h>
|
||||||
#include "radeon_fixed.h"
|
#include <drm/drm_fixed.h>
|
||||||
#include "radeon.h"
|
#include "radeon.h"
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
#include "atom-bits.h"
|
#include "atom-bits.h"
|
||||||
|
|
|
@ -2686,53 +2686,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
|
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
|
||||||
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
|
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
|
||||||
fixed20_12 memtcas_ff[8] = {
|
fixed20_12 memtcas_ff[8] = {
|
||||||
fixed_init(1),
|
dfixed_init(1),
|
||||||
fixed_init(2),
|
dfixed_init(2),
|
||||||
fixed_init(3),
|
dfixed_init(3),
|
||||||
fixed_init(0),
|
dfixed_init(0),
|
||||||
fixed_init_half(1),
|
dfixed_init_half(1),
|
||||||
fixed_init_half(2),
|
dfixed_init_half(2),
|
||||||
fixed_init(0),
|
dfixed_init(0),
|
||||||
};
|
};
|
||||||
fixed20_12 memtcas_rs480_ff[8] = {
|
fixed20_12 memtcas_rs480_ff[8] = {
|
||||||
fixed_init(0),
|
dfixed_init(0),
|
||||||
fixed_init(1),
|
dfixed_init(1),
|
||||||
fixed_init(2),
|
dfixed_init(2),
|
||||||
fixed_init(3),
|
dfixed_init(3),
|
||||||
fixed_init(0),
|
dfixed_init(0),
|
||||||
fixed_init_half(1),
|
dfixed_init_half(1),
|
||||||
fixed_init_half(2),
|
dfixed_init_half(2),
|
||||||
fixed_init_half(3),
|
dfixed_init_half(3),
|
||||||
};
|
};
|
||||||
fixed20_12 memtcas2_ff[8] = {
|
fixed20_12 memtcas2_ff[8] = {
|
||||||
fixed_init(0),
|
dfixed_init(0),
|
||||||
fixed_init(1),
|
dfixed_init(1),
|
||||||
fixed_init(2),
|
dfixed_init(2),
|
||||||
fixed_init(3),
|
dfixed_init(3),
|
||||||
fixed_init(4),
|
dfixed_init(4),
|
||||||
fixed_init(5),
|
dfixed_init(5),
|
||||||
fixed_init(6),
|
dfixed_init(6),
|
||||||
fixed_init(7),
|
dfixed_init(7),
|
||||||
};
|
};
|
||||||
fixed20_12 memtrbs[8] = {
|
fixed20_12 memtrbs[8] = {
|
||||||
fixed_init(1),
|
dfixed_init(1),
|
||||||
fixed_init_half(1),
|
dfixed_init_half(1),
|
||||||
fixed_init(2),
|
dfixed_init(2),
|
||||||
fixed_init_half(2),
|
dfixed_init_half(2),
|
||||||
fixed_init(3),
|
dfixed_init(3),
|
||||||
fixed_init_half(3),
|
dfixed_init_half(3),
|
||||||
fixed_init(4),
|
dfixed_init(4),
|
||||||
fixed_init_half(4)
|
dfixed_init_half(4)
|
||||||
};
|
};
|
||||||
fixed20_12 memtrbs_r4xx[8] = {
|
fixed20_12 memtrbs_r4xx[8] = {
|
||||||
fixed_init(4),
|
dfixed_init(4),
|
||||||
fixed_init(5),
|
dfixed_init(5),
|
||||||
fixed_init(6),
|
dfixed_init(6),
|
||||||
fixed_init(7),
|
dfixed_init(7),
|
||||||
fixed_init(8),
|
dfixed_init(8),
|
||||||
fixed_init(9),
|
dfixed_init(9),
|
||||||
fixed_init(10),
|
dfixed_init(10),
|
||||||
fixed_init(11)
|
dfixed_init(11)
|
||||||
};
|
};
|
||||||
fixed20_12 min_mem_eff;
|
fixed20_12 min_mem_eff;
|
||||||
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
|
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
|
||||||
|
@ -2763,7 +2763,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
min_mem_eff.full = rfixed_const_8(0);
|
min_mem_eff.full = dfixed_const_8(0);
|
||||||
/* get modes */
|
/* get modes */
|
||||||
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
|
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
|
||||||
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
|
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
|
||||||
|
@ -2784,28 +2784,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
mclk_ff = rdev->pm.mclk;
|
mclk_ff = rdev->pm.mclk;
|
||||||
|
|
||||||
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
|
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
|
||||||
temp_ff.full = rfixed_const(temp);
|
temp_ff.full = dfixed_const(temp);
|
||||||
mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
|
mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
|
||||||
|
|
||||||
pix_clk.full = 0;
|
pix_clk.full = 0;
|
||||||
pix_clk2.full = 0;
|
pix_clk2.full = 0;
|
||||||
peak_disp_bw.full = 0;
|
peak_disp_bw.full = 0;
|
||||||
if (mode1) {
|
if (mode1) {
|
||||||
temp_ff.full = rfixed_const(1000);
|
temp_ff.full = dfixed_const(1000);
|
||||||
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
|
pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
|
||||||
pix_clk.full = rfixed_div(pix_clk, temp_ff);
|
pix_clk.full = dfixed_div(pix_clk, temp_ff);
|
||||||
temp_ff.full = rfixed_const(pixel_bytes1);
|
temp_ff.full = dfixed_const(pixel_bytes1);
|
||||||
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
|
peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
|
||||||
}
|
}
|
||||||
if (mode2) {
|
if (mode2) {
|
||||||
temp_ff.full = rfixed_const(1000);
|
temp_ff.full = dfixed_const(1000);
|
||||||
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
|
pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
|
||||||
pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
|
pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
|
||||||
temp_ff.full = rfixed_const(pixel_bytes2);
|
temp_ff.full = dfixed_const(pixel_bytes2);
|
||||||
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
|
peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
|
||||||
}
|
}
|
||||||
|
|
||||||
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
|
mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
|
||||||
if (peak_disp_bw.full >= mem_bw.full) {
|
if (peak_disp_bw.full >= mem_bw.full) {
|
||||||
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
|
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
|
||||||
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
|
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
|
||||||
|
@ -2847,9 +2847,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
mem_tras = ((temp >> 12) & 0xf) + 4;
|
mem_tras = ((temp >> 12) & 0xf) + 4;
|
||||||
}
|
}
|
||||||
/* convert to FF */
|
/* convert to FF */
|
||||||
trcd_ff.full = rfixed_const(mem_trcd);
|
trcd_ff.full = dfixed_const(mem_trcd);
|
||||||
trp_ff.full = rfixed_const(mem_trp);
|
trp_ff.full = dfixed_const(mem_trp);
|
||||||
tras_ff.full = rfixed_const(mem_tras);
|
tras_ff.full = dfixed_const(mem_tras);
|
||||||
|
|
||||||
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
|
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
|
||||||
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
|
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
|
||||||
|
@ -2867,7 +2867,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
/* extra cas latency stored in bits 23-25 0-4 clocks */
|
/* extra cas latency stored in bits 23-25 0-4 clocks */
|
||||||
data = (temp >> 23) & 0x7;
|
data = (temp >> 23) & 0x7;
|
||||||
if (data < 5)
|
if (data < 5)
|
||||||
tcas_ff.full += rfixed_const(data);
|
tcas_ff.full += dfixed_const(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
|
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
|
||||||
|
@ -2904,72 +2904,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
|
|
||||||
if (rdev->flags & RADEON_IS_AGP) {
|
if (rdev->flags & RADEON_IS_AGP) {
|
||||||
fixed20_12 agpmode_ff;
|
fixed20_12 agpmode_ff;
|
||||||
agpmode_ff.full = rfixed_const(radeon_agpmode);
|
agpmode_ff.full = dfixed_const(radeon_agpmode);
|
||||||
temp_ff.full = rfixed_const_666(16);
|
temp_ff.full = dfixed_const_666(16);
|
||||||
sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
|
sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
|
||||||
}
|
}
|
||||||
/* TODO PCIE lanes may affect this - agpmode == 16?? */
|
/* TODO PCIE lanes may affect this - agpmode == 16?? */
|
||||||
|
|
||||||
if (ASIC_IS_R300(rdev)) {
|
if (ASIC_IS_R300(rdev)) {
|
||||||
sclk_delay_ff.full = rfixed_const(250);
|
sclk_delay_ff.full = dfixed_const(250);
|
||||||
} else {
|
} else {
|
||||||
if ((rdev->family == CHIP_RV100) ||
|
if ((rdev->family == CHIP_RV100) ||
|
||||||
rdev->flags & RADEON_IS_IGP) {
|
rdev->flags & RADEON_IS_IGP) {
|
||||||
if (rdev->mc.vram_is_ddr)
|
if (rdev->mc.vram_is_ddr)
|
||||||
sclk_delay_ff.full = rfixed_const(41);
|
sclk_delay_ff.full = dfixed_const(41);
|
||||||
else
|
else
|
||||||
sclk_delay_ff.full = rfixed_const(33);
|
sclk_delay_ff.full = dfixed_const(33);
|
||||||
} else {
|
} else {
|
||||||
if (rdev->mc.vram_width == 128)
|
if (rdev->mc.vram_width == 128)
|
||||||
sclk_delay_ff.full = rfixed_const(57);
|
sclk_delay_ff.full = dfixed_const(57);
|
||||||
else
|
else
|
||||||
sclk_delay_ff.full = rfixed_const(41);
|
sclk_delay_ff.full = dfixed_const(41);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
|
mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
|
||||||
|
|
||||||
if (rdev->mc.vram_is_ddr) {
|
if (rdev->mc.vram_is_ddr) {
|
||||||
if (rdev->mc.vram_width == 32) {
|
if (rdev->mc.vram_width == 32) {
|
||||||
k1.full = rfixed_const(40);
|
k1.full = dfixed_const(40);
|
||||||
c = 3;
|
c = 3;
|
||||||
} else {
|
} else {
|
||||||
k1.full = rfixed_const(20);
|
k1.full = dfixed_const(20);
|
||||||
c = 1;
|
c = 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
k1.full = rfixed_const(40);
|
k1.full = dfixed_const(40);
|
||||||
c = 3;
|
c = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
temp_ff.full = rfixed_const(2);
|
temp_ff.full = dfixed_const(2);
|
||||||
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
|
mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
|
||||||
temp_ff.full = rfixed_const(c);
|
temp_ff.full = dfixed_const(c);
|
||||||
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
|
mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
|
||||||
temp_ff.full = rfixed_const(4);
|
temp_ff.full = dfixed_const(4);
|
||||||
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
|
mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
|
||||||
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
|
mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
|
||||||
mc_latency_mclk.full += k1.full;
|
mc_latency_mclk.full += k1.full;
|
||||||
|
|
||||||
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
|
mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
|
||||||
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
|
mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
HW cursor time assuming worst case of full size colour cursor.
|
HW cursor time assuming worst case of full size colour cursor.
|
||||||
*/
|
*/
|
||||||
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
|
temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
|
||||||
temp_ff.full += trcd_ff.full;
|
temp_ff.full += trcd_ff.full;
|
||||||
if (temp_ff.full < tras_ff.full)
|
if (temp_ff.full < tras_ff.full)
|
||||||
temp_ff.full = tras_ff.full;
|
temp_ff.full = tras_ff.full;
|
||||||
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
|
cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
|
||||||
|
|
||||||
temp_ff.full = rfixed_const(cur_size);
|
temp_ff.full = dfixed_const(cur_size);
|
||||||
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
|
cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
|
||||||
/*
|
/*
|
||||||
Find the total latency for the display data.
|
Find the total latency for the display data.
|
||||||
*/
|
*/
|
||||||
disp_latency_overhead.full = rfixed_const(8);
|
disp_latency_overhead.full = dfixed_const(8);
|
||||||
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
|
disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
|
||||||
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
|
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
|
||||||
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
|
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
|
||||||
|
|
||||||
|
@ -2997,16 +2997,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
/*
|
/*
|
||||||
Find the drain rate of the display buffer.
|
Find the drain rate of the display buffer.
|
||||||
*/
|
*/
|
||||||
temp_ff.full = rfixed_const((16/pixel_bytes1));
|
temp_ff.full = dfixed_const((16/pixel_bytes1));
|
||||||
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
|
disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Find the critical point of the display buffer.
|
Find the critical point of the display buffer.
|
||||||
*/
|
*/
|
||||||
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
|
crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
|
||||||
crit_point_ff.full += rfixed_const_half(0);
|
crit_point_ff.full += dfixed_const_half(0);
|
||||||
|
|
||||||
critical_point = rfixed_trunc(crit_point_ff);
|
critical_point = dfixed_trunc(crit_point_ff);
|
||||||
|
|
||||||
if (rdev->disp_priority == 2) {
|
if (rdev->disp_priority == 2) {
|
||||||
critical_point = 0;
|
critical_point = 0;
|
||||||
|
@ -3077,8 +3077,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
/*
|
/*
|
||||||
Find the drain rate of the display buffer.
|
Find the drain rate of the display buffer.
|
||||||
*/
|
*/
|
||||||
temp_ff.full = rfixed_const((16/pixel_bytes2));
|
temp_ff.full = dfixed_const((16/pixel_bytes2));
|
||||||
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
|
disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
|
||||||
|
|
||||||
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
|
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
|
||||||
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
|
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
|
||||||
|
@ -3099,8 +3099,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
critical_point2 = 0;
|
critical_point2 = 0;
|
||||||
else {
|
else {
|
||||||
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
|
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
|
||||||
temp_ff.full = rfixed_const(temp);
|
temp_ff.full = dfixed_const(temp);
|
||||||
temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
|
temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
|
||||||
if (sclk_ff.full < temp_ff.full)
|
if (sclk_ff.full < temp_ff.full)
|
||||||
temp_ff.full = sclk_ff.full;
|
temp_ff.full = sclk_ff.full;
|
||||||
|
|
||||||
|
@ -3108,15 +3108,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
|
||||||
|
|
||||||
if (mode1) {
|
if (mode1) {
|
||||||
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
|
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
|
||||||
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
|
time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
|
||||||
} else {
|
} else {
|
||||||
time_disp1_drop_priority.full = 0;
|
time_disp1_drop_priority.full = 0;
|
||||||
}
|
}
|
||||||
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
|
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
|
||||||
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
|
crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
|
||||||
crit_point_ff.full += rfixed_const_half(0);
|
crit_point_ff.full += dfixed_const_half(0);
|
||||||
|
|
||||||
critical_point2 = rfixed_trunc(crit_point_ff);
|
critical_point2 = dfixed_trunc(crit_point_ff);
|
||||||
|
|
||||||
if (rdev->disp_priority == 2) {
|
if (rdev->disp_priority == 2) {
|
||||||
critical_point2 = 0;
|
critical_point2 = 0;
|
||||||
|
|
|
@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
|
||||||
sclk = radeon_get_engine_clock(rdev);
|
sclk = radeon_get_engine_clock(rdev);
|
||||||
mclk = rdev->clock.default_mclk;
|
mclk = rdev->clock.default_mclk;
|
||||||
|
|
||||||
a.full = rfixed_const(100);
|
a.full = dfixed_const(100);
|
||||||
rdev->pm.sclk.full = rfixed_const(sclk);
|
rdev->pm.sclk.full = dfixed_const(sclk);
|
||||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
|
||||||
rdev->pm.mclk.full = rfixed_const(mclk);
|
rdev->pm.mclk.full = dfixed_const(mclk);
|
||||||
rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
|
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
|
||||||
|
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
/* core_bandwidth = sclk(Mhz) * 16 */
|
/* core_bandwidth = sclk(Mhz) * 16 */
|
||||||
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
|
rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
|
||||||
} else {
|
} else {
|
||||||
sclk = radeon_get_engine_clock(rdev);
|
sclk = radeon_get_engine_clock(rdev);
|
||||||
mclk = radeon_get_memory_clock(rdev);
|
mclk = radeon_get_memory_clock(rdev);
|
||||||
|
|
||||||
a.full = rfixed_const(100);
|
a.full = dfixed_const(100);
|
||||||
rdev->pm.sclk.full = rfixed_const(sclk);
|
rdev->pm.sclk.full = dfixed_const(sclk);
|
||||||
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
|
rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
|
||||||
rdev->pm.mclk.full = rfixed_const(mclk);
|
rdev->pm.mclk.full = dfixed_const(mclk);
|
||||||
rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
|
rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll,
|
||||||
|
|
||||||
vco_freq = freq * post_div;
|
vco_freq = freq * post_div;
|
||||||
/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
|
/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
|
||||||
a.full = rfixed_const(pll->reference_freq);
|
a.full = dfixed_const(pll->reference_freq);
|
||||||
feedback_divider.full = rfixed_const(vco_freq);
|
feedback_divider.full = dfixed_const(vco_freq);
|
||||||
feedback_divider.full = rfixed_div(feedback_divider, a);
|
feedback_divider.full = dfixed_div(feedback_divider, a);
|
||||||
a.full = rfixed_const(ref_div);
|
a.full = dfixed_const(ref_div);
|
||||||
feedback_divider.full = rfixed_mul(feedback_divider, a);
|
feedback_divider.full = dfixed_mul(feedback_divider, a);
|
||||||
|
|
||||||
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
|
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
|
||||||
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
|
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
|
||||||
a.full = rfixed_const(10);
|
a.full = dfixed_const(10);
|
||||||
feedback_divider.full = rfixed_mul(feedback_divider, a);
|
feedback_divider.full = dfixed_mul(feedback_divider, a);
|
||||||
feedback_divider.full += rfixed_const_half(0);
|
feedback_divider.full += dfixed_const_half(0);
|
||||||
feedback_divider.full = rfixed_floor(feedback_divider);
|
feedback_divider.full = dfixed_floor(feedback_divider);
|
||||||
feedback_divider.full = rfixed_div(feedback_divider, a);
|
feedback_divider.full = dfixed_div(feedback_divider, a);
|
||||||
|
|
||||||
/* *fb_div = floor(feedback_divider); */
|
/* *fb_div = floor(feedback_divider); */
|
||||||
a.full = rfixed_floor(feedback_divider);
|
a.full = dfixed_floor(feedback_divider);
|
||||||
*fb_div = rfixed_trunc(a);
|
*fb_div = dfixed_trunc(a);
|
||||||
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
|
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
|
||||||
a.full = rfixed_const(10);
|
a.full = dfixed_const(10);
|
||||||
b.full = rfixed_mul(feedback_divider, a);
|
b.full = dfixed_mul(feedback_divider, a);
|
||||||
|
|
||||||
feedback_divider.full = rfixed_floor(feedback_divider);
|
feedback_divider.full = dfixed_floor(feedback_divider);
|
||||||
feedback_divider.full = rfixed_mul(feedback_divider, a);
|
feedback_divider.full = dfixed_mul(feedback_divider, a);
|
||||||
feedback_divider.full = b.full - feedback_divider.full;
|
feedback_divider.full = b.full - feedback_divider.full;
|
||||||
*fb_div_frac = rfixed_trunc(feedback_divider);
|
*fb_div_frac = dfixed_trunc(feedback_divider);
|
||||||
} else {
|
} else {
|
||||||
/* *fb_div = floor(feedback_divider + 0.5); */
|
/* *fb_div = floor(feedback_divider + 0.5); */
|
||||||
feedback_divider.full += rfixed_const_half(0);
|
feedback_divider.full += dfixed_const_half(0);
|
||||||
feedback_divider.full = rfixed_floor(feedback_divider);
|
feedback_divider.full = dfixed_floor(feedback_divider);
|
||||||
|
|
||||||
*fb_div = rfixed_trunc(feedback_divider);
|
*fb_div = dfixed_trunc(feedback_divider);
|
||||||
*fb_div_frac = 0;
|
*fb_div_frac = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll,
|
||||||
pll_out_max = pll->pll_out_max;
|
pll_out_max = pll->pll_out_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
ffreq.full = rfixed_const(freq);
|
ffreq.full = dfixed_const(freq);
|
||||||
/* max_error = ffreq * 0.0025; */
|
/* max_error = ffreq * 0.0025; */
|
||||||
a.full = rfixed_const(400);
|
a.full = dfixed_const(400);
|
||||||
max_error.full = rfixed_div(ffreq, a);
|
max_error.full = dfixed_div(ffreq, a);
|
||||||
|
|
||||||
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
|
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
|
||||||
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
|
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
|
||||||
|
@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* pll_out = vco / post_div; */
|
/* pll_out = vco / post_div; */
|
||||||
a.full = rfixed_const(post_div);
|
a.full = dfixed_const(post_div);
|
||||||
pll_out.full = rfixed_const(vco);
|
pll_out.full = dfixed_const(vco);
|
||||||
pll_out.full = rfixed_div(pll_out, a);
|
pll_out.full = dfixed_div(pll_out, a);
|
||||||
|
|
||||||
if (pll_out.full >= ffreq.full) {
|
if (pll_out.full >= ffreq.full) {
|
||||||
error.full = pll_out.full - ffreq.full;
|
error.full = pll_out.full - ffreq.full;
|
||||||
|
@ -1099,15 +1099,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
||||||
}
|
}
|
||||||
if (radeon_crtc->rmx_type != RMX_OFF) {
|
if (radeon_crtc->rmx_type != RMX_OFF) {
|
||||||
fixed20_12 a, b;
|
fixed20_12 a, b;
|
||||||
a.full = rfixed_const(crtc->mode.vdisplay);
|
a.full = dfixed_const(crtc->mode.vdisplay);
|
||||||
b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
|
b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
|
||||||
radeon_crtc->vsc.full = rfixed_div(a, b);
|
radeon_crtc->vsc.full = dfixed_div(a, b);
|
||||||
a.full = rfixed_const(crtc->mode.hdisplay);
|
a.full = dfixed_const(crtc->mode.hdisplay);
|
||||||
b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
|
b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
|
||||||
radeon_crtc->hsc.full = rfixed_div(a, b);
|
radeon_crtc->hsc.full = dfixed_div(a, b);
|
||||||
} else {
|
} else {
|
||||||
radeon_crtc->vsc.full = rfixed_const(1);
|
radeon_crtc->vsc.full = dfixed_const(1);
|
||||||
radeon_crtc->hsc.full = rfixed_const(1);
|
radeon_crtc->hsc.full = dfixed_const(1);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include <drm/drm_crtc_helper.h>
|
#include <drm/drm_crtc_helper.h>
|
||||||
#include <drm/radeon_drm.h>
|
#include <drm/radeon_drm.h>
|
||||||
#include "radeon_fixed.h"
|
#include <drm/drm_fixed.h>
|
||||||
#include "radeon.h"
|
#include "radeon.h"
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
|
|
||||||
|
|
|
@ -34,10 +34,10 @@
|
||||||
#include <drm_mode.h>
|
#include <drm_mode.h>
|
||||||
#include <drm_edid.h>
|
#include <drm_edid.h>
|
||||||
#include <drm_dp_helper.h>
|
#include <drm_dp_helper.h>
|
||||||
|
#include <drm_fixed.h>
|
||||||
#include <linux/i2c.h>
|
#include <linux/i2c.h>
|
||||||
#include <linux/i2c-id.h>
|
#include <linux/i2c-id.h>
|
||||||
#include <linux/i2c-algo-bit.h>
|
#include <linux/i2c-algo-bit.h>
|
||||||
#include "radeon_fixed.h"
|
|
||||||
|
|
||||||
struct radeon_bo;
|
struct radeon_bo;
|
||||||
struct radeon_device;
|
struct radeon_device;
|
||||||
|
|
|
@ -76,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev)
|
||||||
/* Get various system informations from bios */
|
/* Get various system informations from bios */
|
||||||
switch (crev) {
|
switch (crev) {
|
||||||
case 1:
|
case 1:
|
||||||
tmp.full = rfixed_const(100);
|
tmp.full = dfixed_const(100);
|
||||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
|
rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
|
||||||
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
||||||
rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
|
rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
|
||||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
|
rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
|
||||||
rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
|
rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
tmp.full = rfixed_const(100);
|
tmp.full = dfixed_const(100);
|
||||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
|
rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
|
||||||
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
|
||||||
rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
|
rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
|
||||||
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
|
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
|
||||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
|
rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
|
||||||
rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
|
rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
|
||||||
rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
|
rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
tmp.full = rfixed_const(100);
|
tmp.full = dfixed_const(100);
|
||||||
/* We assume the slower possible clock ie worst case */
|
/* We assume the slower possible clock ie worst case */
|
||||||
/* DDR 333Mhz */
|
/* DDR 333Mhz */
|
||||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
|
rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
|
||||||
/* FIXME: system clock ? */
|
/* FIXME: system clock ? */
|
||||||
rdev->pm.igp_system_mclk.full = rfixed_const(100);
|
rdev->pm.igp_system_mclk.full = dfixed_const(100);
|
||||||
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
|
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
|
||||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
|
rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
|
||||||
rdev->pm.igp_ht_link_width.full = rfixed_const(8);
|
rdev->pm.igp_ht_link_width.full = dfixed_const(8);
|
||||||
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
|
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tmp.full = rfixed_const(100);
|
tmp.full = dfixed_const(100);
|
||||||
/* We assume the slower possible clock ie worst case */
|
/* We assume the slower possible clock ie worst case */
|
||||||
/* DDR 333Mhz */
|
/* DDR 333Mhz */
|
||||||
rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
|
rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
|
||||||
/* FIXME: system clock ? */
|
/* FIXME: system clock ? */
|
||||||
rdev->pm.igp_system_mclk.full = rfixed_const(100);
|
rdev->pm.igp_system_mclk.full = dfixed_const(100);
|
||||||
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
|
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
|
||||||
rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
|
rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
|
||||||
rdev->pm.igp_ht_link_width.full = rfixed_const(8);
|
rdev->pm.igp_ht_link_width.full = dfixed_const(8);
|
||||||
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
|
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
|
||||||
}
|
}
|
||||||
/* Compute various bandwidth */
|
/* Compute various bandwidth */
|
||||||
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
|
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
|
||||||
tmp.full = rfixed_const(4);
|
tmp.full = dfixed_const(4);
|
||||||
rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
|
rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
|
||||||
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
|
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
|
||||||
* = ht_clk * ht_width / 5
|
* = ht_clk * ht_width / 5
|
||||||
*/
|
*/
|
||||||
tmp.full = rfixed_const(5);
|
tmp.full = dfixed_const(5);
|
||||||
rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
|
rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
|
||||||
rdev->pm.igp_ht_link_width);
|
rdev->pm.igp_ht_link_width);
|
||||||
rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
|
rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
|
||||||
if (tmp.full < rdev->pm.max_bandwidth.full) {
|
if (tmp.full < rdev->pm.max_bandwidth.full) {
|
||||||
/* HT link is a limiting factor */
|
/* HT link is a limiting factor */
|
||||||
rdev->pm.max_bandwidth.full = tmp.full;
|
rdev->pm.max_bandwidth.full = tmp.full;
|
||||||
|
@ -136,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev)
|
||||||
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
|
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
|
||||||
* = (sideport_clk * 14) / 10
|
* = (sideport_clk * 14) / 10
|
||||||
*/
|
*/
|
||||||
tmp.full = rfixed_const(14);
|
tmp.full = dfixed_const(14);
|
||||||
rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
|
rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
|
||||||
tmp.full = rfixed_const(10);
|
tmp.full = dfixed_const(10);
|
||||||
rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
|
rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rs690_mc_init(struct radeon_device *rdev)
|
void rs690_mc_init(struct radeon_device *rdev)
|
||||||
|
@ -239,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (crtc->vsc.full > rfixed_const(2))
|
if (crtc->vsc.full > dfixed_const(2))
|
||||||
wm->num_line_pair.full = rfixed_const(2);
|
wm->num_line_pair.full = dfixed_const(2);
|
||||||
else
|
else
|
||||||
wm->num_line_pair.full = rfixed_const(1);
|
wm->num_line_pair.full = dfixed_const(1);
|
||||||
|
|
||||||
b.full = rfixed_const(mode->crtc_hdisplay);
|
b.full = dfixed_const(mode->crtc_hdisplay);
|
||||||
c.full = rfixed_const(256);
|
c.full = dfixed_const(256);
|
||||||
a.full = rfixed_div(b, c);
|
a.full = dfixed_div(b, c);
|
||||||
request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
|
request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
|
||||||
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
|
request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
|
||||||
if (a.full < rfixed_const(4)) {
|
if (a.full < dfixed_const(4)) {
|
||||||
wm->lb_request_fifo_depth = 4;
|
wm->lb_request_fifo_depth = 4;
|
||||||
} else {
|
} else {
|
||||||
wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
|
wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Determine consumption rate
|
/* Determine consumption rate
|
||||||
|
@ -261,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* vsc = vertical scaling ratio, defined as source/destination
|
* vsc = vertical scaling ratio, defined as source/destination
|
||||||
* hsc = horizontal scaling ration, defined as source/destination
|
* hsc = horizontal scaling ration, defined as source/destination
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(mode->clock);
|
a.full = dfixed_const(mode->clock);
|
||||||
b.full = rfixed_const(1000);
|
b.full = dfixed_const(1000);
|
||||||
a.full = rfixed_div(a, b);
|
a.full = dfixed_div(a, b);
|
||||||
pclk.full = rfixed_div(b, a);
|
pclk.full = dfixed_div(b, a);
|
||||||
if (crtc->rmx_type != RMX_OFF) {
|
if (crtc->rmx_type != RMX_OFF) {
|
||||||
b.full = rfixed_const(2);
|
b.full = dfixed_const(2);
|
||||||
if (crtc->vsc.full > b.full)
|
if (crtc->vsc.full > b.full)
|
||||||
b.full = crtc->vsc.full;
|
b.full = crtc->vsc.full;
|
||||||
b.full = rfixed_mul(b, crtc->hsc);
|
b.full = dfixed_mul(b, crtc->hsc);
|
||||||
c.full = rfixed_const(2);
|
c.full = dfixed_const(2);
|
||||||
b.full = rfixed_div(b, c);
|
b.full = dfixed_div(b, c);
|
||||||
consumption_time.full = rfixed_div(pclk, b);
|
consumption_time.full = dfixed_div(pclk, b);
|
||||||
} else {
|
} else {
|
||||||
consumption_time.full = pclk.full;
|
consumption_time.full = pclk.full;
|
||||||
}
|
}
|
||||||
a.full = rfixed_const(1);
|
a.full = dfixed_const(1);
|
||||||
wm->consumption_rate.full = rfixed_div(a, consumption_time);
|
wm->consumption_rate.full = dfixed_div(a, consumption_time);
|
||||||
|
|
||||||
|
|
||||||
/* Determine line time
|
/* Determine line time
|
||||||
|
@ -285,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* LineTime = total number of horizontal pixels
|
* LineTime = total number of horizontal pixels
|
||||||
* pclk = pixel clock period(ns)
|
* pclk = pixel clock period(ns)
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
|
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
|
||||||
line_time.full = rfixed_mul(a, pclk);
|
line_time.full = dfixed_mul(a, pclk);
|
||||||
|
|
||||||
/* Determine active time
|
/* Determine active time
|
||||||
* ActiveTime = time of active region of display within one line,
|
* ActiveTime = time of active region of display within one line,
|
||||||
* hactive = total number of horizontal active pixels
|
* hactive = total number of horizontal active pixels
|
||||||
* htotal = total number of horizontal pixels
|
* htotal = total number of horizontal pixels
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
|
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
|
||||||
b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
|
b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
|
||||||
wm->active_time.full = rfixed_mul(line_time, b);
|
wm->active_time.full = dfixed_mul(line_time, b);
|
||||||
wm->active_time.full = rfixed_div(wm->active_time, a);
|
wm->active_time.full = dfixed_div(wm->active_time, a);
|
||||||
|
|
||||||
/* Maximun bandwidth is the minimun bandwidth of all component */
|
/* Maximun bandwidth is the minimun bandwidth of all component */
|
||||||
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
|
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
|
||||||
|
@ -304,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
|
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
|
||||||
rdev->pm.sideport_bandwidth.full)
|
rdev->pm.sideport_bandwidth.full)
|
||||||
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
|
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
|
||||||
read_delay_latency.full = rfixed_const(370 * 800 * 1000);
|
read_delay_latency.full = dfixed_const(370 * 800 * 1000);
|
||||||
read_delay_latency.full = rfixed_div(read_delay_latency,
|
read_delay_latency.full = dfixed_div(read_delay_latency,
|
||||||
rdev->pm.igp_sideport_mclk);
|
rdev->pm.igp_sideport_mclk);
|
||||||
} else {
|
} else {
|
||||||
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
|
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
|
||||||
|
@ -314,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
|
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
|
||||||
rdev->pm.ht_bandwidth.full)
|
rdev->pm.ht_bandwidth.full)
|
||||||
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
|
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
|
||||||
read_delay_latency.full = rfixed_const(5000);
|
read_delay_latency.full = dfixed_const(5000);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
|
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
|
rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
|
||||||
a.full = rfixed_const(1000);
|
a.full = dfixed_const(1000);
|
||||||
rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
|
rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
|
||||||
/* Determine chunk time
|
/* Determine chunk time
|
||||||
* ChunkTime = the time it takes the DCP to send one chunk of data
|
* ChunkTime = the time it takes the DCP to send one chunk of data
|
||||||
* to the LB which consists of pipeline delay and inter chunk gap
|
* to the LB which consists of pipeline delay and inter chunk gap
|
||||||
* sclk = system clock(ns)
|
* sclk = system clock(ns)
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(256 * 13);
|
a.full = dfixed_const(256 * 13);
|
||||||
chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
|
chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
|
||||||
a.full = rfixed_const(10);
|
a.full = dfixed_const(10);
|
||||||
chunk_time.full = rfixed_div(chunk_time, a);
|
chunk_time.full = dfixed_div(chunk_time, a);
|
||||||
|
|
||||||
/* Determine the worst case latency
|
/* Determine the worst case latency
|
||||||
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
|
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
|
||||||
|
@ -340,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
|
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
|
||||||
* which consists of pipeline delay and inter chunk gap
|
* which consists of pipeline delay and inter chunk gap
|
||||||
*/
|
*/
|
||||||
if (rfixed_trunc(wm->num_line_pair) > 1) {
|
if (dfixed_trunc(wm->num_line_pair) > 1) {
|
||||||
a.full = rfixed_const(3);
|
a.full = dfixed_const(3);
|
||||||
wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
|
wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
|
||||||
wm->worst_case_latency.full += read_delay_latency.full;
|
wm->worst_case_latency.full += read_delay_latency.full;
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_const(2);
|
a.full = dfixed_const(2);
|
||||||
wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
|
wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
|
||||||
wm->worst_case_latency.full += read_delay_latency.full;
|
wm->worst_case_latency.full += read_delay_latency.full;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* of data to the LB which consists of
|
* of data to the LB which consists of
|
||||||
* pipeline delay and inter chunk gap
|
* pipeline delay and inter chunk gap
|
||||||
*/
|
*/
|
||||||
if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
|
if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
|
||||||
tolerable_latency.full = line_time.full;
|
tolerable_latency.full = line_time.full;
|
||||||
} else {
|
} else {
|
||||||
tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
|
tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
|
||||||
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
|
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
|
||||||
tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
|
tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
|
||||||
tolerable_latency.full = line_time.full - tolerable_latency.full;
|
tolerable_latency.full = line_time.full - tolerable_latency.full;
|
||||||
}
|
}
|
||||||
/* We assume worst case 32bits (4 bytes) */
|
/* We assume worst case 32bits (4 bytes) */
|
||||||
wm->dbpp.full = rfixed_const(4 * 8);
|
wm->dbpp.full = dfixed_const(4 * 8);
|
||||||
|
|
||||||
/* Determine the maximum priority mark
|
/* Determine the maximum priority mark
|
||||||
* width = viewport width in pixels
|
* width = viewport width in pixels
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
|
wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
|
||||||
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
|
wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
|
||||||
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
|
wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
|
||||||
|
|
||||||
/* Determine estimated width */
|
/* Determine estimated width */
|
||||||
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
|
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
|
||||||
estimated_width.full = rfixed_div(estimated_width, consumption_time);
|
estimated_width.full = dfixed_div(estimated_width, consumption_time);
|
||||||
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
|
if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
|
||||||
wm->priority_mark.full = rfixed_const(10);
|
wm->priority_mark.full = dfixed_const(10);
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
wm->priority_mark.full = rfixed_div(estimated_width, a);
|
wm->priority_mark.full = dfixed_div(estimated_width, a);
|
||||||
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
|
wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
|
||||||
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
|
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -439,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||||
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
|
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
|
||||||
|
|
||||||
if (mode0 && mode1) {
|
if (mode0 && mode1) {
|
||||||
if (rfixed_trunc(wm0.dbpp) > 64)
|
if (dfixed_trunc(wm0.dbpp) > 64)
|
||||||
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
|
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
|
||||||
else
|
else
|
||||||
a.full = wm0.num_line_pair.full;
|
a.full = wm0.num_line_pair.full;
|
||||||
if (rfixed_trunc(wm1.dbpp) > 64)
|
if (dfixed_trunc(wm1.dbpp) > 64)
|
||||||
b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
|
b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
|
||||||
else
|
else
|
||||||
b.full = wm1.num_line_pair.full;
|
b.full = wm1.num_line_pair.full;
|
||||||
a.full += b.full;
|
a.full += b.full;
|
||||||
fill_rate.full = rfixed_div(wm0.sclk, a);
|
fill_rate.full = dfixed_div(wm0.sclk, a);
|
||||||
if (wm0.consumption_rate.full > fill_rate.full) {
|
if (wm0.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm0.consumption_rate.full - fill_rate.full;
|
b.full = wm0.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm0.active_time);
|
b.full = dfixed_mul(b, wm0.active_time);
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
a.full = a.full + b.full;
|
a.full = a.full + b.full;
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark02.full = rfixed_div(a, b);
|
priority_mark02.full = dfixed_div(a, b);
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark02.full = rfixed_div(a, b);
|
priority_mark02.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm1.consumption_rate.full > fill_rate.full) {
|
if (wm1.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm1.consumption_rate.full - fill_rate.full;
|
b.full = wm1.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm1.active_time);
|
b.full = dfixed_mul(b, wm1.active_time);
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
a.full = a.full + b.full;
|
a.full = a.full + b.full;
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark12.full = rfixed_div(a, b);
|
priority_mark12.full = dfixed_div(a, b);
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark12.full = rfixed_div(a, b);
|
priority_mark12.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm0.priority_mark.full > priority_mark02.full)
|
if (wm0.priority_mark.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark.full;
|
priority_mark02.full = wm0.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark02) < 0)
|
if (dfixed_trunc(priority_mark02) < 0)
|
||||||
priority_mark02.full = 0;
|
priority_mark02.full = 0;
|
||||||
if (wm0.priority_mark_max.full > priority_mark02.full)
|
if (wm0.priority_mark_max.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark_max.full;
|
priority_mark02.full = wm0.priority_mark_max.full;
|
||||||
if (wm1.priority_mark.full > priority_mark12.full)
|
if (wm1.priority_mark.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark.full;
|
priority_mark12.full = wm1.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark12) < 0)
|
if (dfixed_trunc(priority_mark12) < 0)
|
||||||
priority_mark12.full = 0;
|
priority_mark12.full = 0;
|
||||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark_max.full;
|
priority_mark12.full = wm1.priority_mark_max.full;
|
||||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
|
||||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
|
||||||
if (rdev->disp_priority == 2) {
|
if (rdev->disp_priority == 2) {
|
||||||
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
|
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||||
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
|
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||||
|
@ -500,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||||
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
||||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
||||||
} else if (mode0) {
|
} else if (mode0) {
|
||||||
if (rfixed_trunc(wm0.dbpp) > 64)
|
if (dfixed_trunc(wm0.dbpp) > 64)
|
||||||
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
|
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
|
||||||
else
|
else
|
||||||
a.full = wm0.num_line_pair.full;
|
a.full = wm0.num_line_pair.full;
|
||||||
fill_rate.full = rfixed_div(wm0.sclk, a);
|
fill_rate.full = dfixed_div(wm0.sclk, a);
|
||||||
if (wm0.consumption_rate.full > fill_rate.full) {
|
if (wm0.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm0.consumption_rate.full - fill_rate.full;
|
b.full = wm0.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm0.active_time);
|
b.full = dfixed_mul(b, wm0.active_time);
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
a.full = a.full + b.full;
|
a.full = a.full + b.full;
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark02.full = rfixed_div(a, b);
|
priority_mark02.full = dfixed_div(a, b);
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark02.full = rfixed_div(a, b);
|
priority_mark02.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm0.priority_mark.full > priority_mark02.full)
|
if (wm0.priority_mark.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark.full;
|
priority_mark02.full = wm0.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark02) < 0)
|
if (dfixed_trunc(priority_mark02) < 0)
|
||||||
priority_mark02.full = 0;
|
priority_mark02.full = 0;
|
||||||
if (wm0.priority_mark_max.full > priority_mark02.full)
|
if (wm0.priority_mark_max.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark_max.full;
|
priority_mark02.full = wm0.priority_mark_max.full;
|
||||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
|
||||||
if (rdev->disp_priority == 2)
|
if (rdev->disp_priority == 2)
|
||||||
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
|
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
||||||
|
@ -535,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
|
||||||
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
|
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
|
||||||
S_006D4C_D2MODE_PRIORITY_B_OFF(1));
|
S_006D4C_D2MODE_PRIORITY_B_OFF(1));
|
||||||
} else {
|
} else {
|
||||||
if (rfixed_trunc(wm1.dbpp) > 64)
|
if (dfixed_trunc(wm1.dbpp) > 64)
|
||||||
a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
|
a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
|
||||||
else
|
else
|
||||||
a.full = wm1.num_line_pair.full;
|
a.full = wm1.num_line_pair.full;
|
||||||
fill_rate.full = rfixed_div(wm1.sclk, a);
|
fill_rate.full = dfixed_div(wm1.sclk, a);
|
||||||
if (wm1.consumption_rate.full > fill_rate.full) {
|
if (wm1.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm1.consumption_rate.full - fill_rate.full;
|
b.full = wm1.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm1.active_time);
|
b.full = dfixed_mul(b, wm1.active_time);
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
a.full = a.full + b.full;
|
a.full = a.full + b.full;
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark12.full = rfixed_div(a, b);
|
priority_mark12.full = dfixed_div(a, b);
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark12.full = rfixed_div(a, b);
|
priority_mark12.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm1.priority_mark.full > priority_mark12.full)
|
if (wm1.priority_mark.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark.full;
|
priority_mark12.full = wm1.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark12) < 0)
|
if (dfixed_trunc(priority_mark12) < 0)
|
||||||
priority_mark12.full = 0;
|
priority_mark12.full = 0;
|
||||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark_max.full;
|
priority_mark12.full = wm1.priority_mark_max.full;
|
||||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
|
||||||
if (rdev->disp_priority == 2)
|
if (rdev->disp_priority == 2)
|
||||||
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
|
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
|
||||||
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
|
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
|
||||||
|
|
|
@ -795,20 +795,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (crtc->vsc.full > rfixed_const(2))
|
if (crtc->vsc.full > dfixed_const(2))
|
||||||
wm->num_line_pair.full = rfixed_const(2);
|
wm->num_line_pair.full = dfixed_const(2);
|
||||||
else
|
else
|
||||||
wm->num_line_pair.full = rfixed_const(1);
|
wm->num_line_pair.full = dfixed_const(1);
|
||||||
|
|
||||||
b.full = rfixed_const(mode->crtc_hdisplay);
|
b.full = dfixed_const(mode->crtc_hdisplay);
|
||||||
c.full = rfixed_const(256);
|
c.full = dfixed_const(256);
|
||||||
a.full = rfixed_div(b, c);
|
a.full = dfixed_div(b, c);
|
||||||
request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
|
request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
|
||||||
request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
|
request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
|
||||||
if (a.full < rfixed_const(4)) {
|
if (a.full < dfixed_const(4)) {
|
||||||
wm->lb_request_fifo_depth = 4;
|
wm->lb_request_fifo_depth = 4;
|
||||||
} else {
|
} else {
|
||||||
wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
|
wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Determine consumption rate
|
/* Determine consumption rate
|
||||||
|
@ -817,23 +817,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* vsc = vertical scaling ratio, defined as source/destination
|
* vsc = vertical scaling ratio, defined as source/destination
|
||||||
* hsc = horizontal scaling ration, defined as source/destination
|
* hsc = horizontal scaling ration, defined as source/destination
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(mode->clock);
|
a.full = dfixed_const(mode->clock);
|
||||||
b.full = rfixed_const(1000);
|
b.full = dfixed_const(1000);
|
||||||
a.full = rfixed_div(a, b);
|
a.full = dfixed_div(a, b);
|
||||||
pclk.full = rfixed_div(b, a);
|
pclk.full = dfixed_div(b, a);
|
||||||
if (crtc->rmx_type != RMX_OFF) {
|
if (crtc->rmx_type != RMX_OFF) {
|
||||||
b.full = rfixed_const(2);
|
b.full = dfixed_const(2);
|
||||||
if (crtc->vsc.full > b.full)
|
if (crtc->vsc.full > b.full)
|
||||||
b.full = crtc->vsc.full;
|
b.full = crtc->vsc.full;
|
||||||
b.full = rfixed_mul(b, crtc->hsc);
|
b.full = dfixed_mul(b, crtc->hsc);
|
||||||
c.full = rfixed_const(2);
|
c.full = dfixed_const(2);
|
||||||
b.full = rfixed_div(b, c);
|
b.full = dfixed_div(b, c);
|
||||||
consumption_time.full = rfixed_div(pclk, b);
|
consumption_time.full = dfixed_div(pclk, b);
|
||||||
} else {
|
} else {
|
||||||
consumption_time.full = pclk.full;
|
consumption_time.full = pclk.full;
|
||||||
}
|
}
|
||||||
a.full = rfixed_const(1);
|
a.full = dfixed_const(1);
|
||||||
wm->consumption_rate.full = rfixed_div(a, consumption_time);
|
wm->consumption_rate.full = dfixed_div(a, consumption_time);
|
||||||
|
|
||||||
|
|
||||||
/* Determine line time
|
/* Determine line time
|
||||||
|
@ -841,27 +841,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* LineTime = total number of horizontal pixels
|
* LineTime = total number of horizontal pixels
|
||||||
* pclk = pixel clock period(ns)
|
* pclk = pixel clock period(ns)
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
|
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
|
||||||
line_time.full = rfixed_mul(a, pclk);
|
line_time.full = dfixed_mul(a, pclk);
|
||||||
|
|
||||||
/* Determine active time
|
/* Determine active time
|
||||||
* ActiveTime = time of active region of display within one line,
|
* ActiveTime = time of active region of display within one line,
|
||||||
* hactive = total number of horizontal active pixels
|
* hactive = total number of horizontal active pixels
|
||||||
* htotal = total number of horizontal pixels
|
* htotal = total number of horizontal pixels
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(crtc->base.mode.crtc_htotal);
|
a.full = dfixed_const(crtc->base.mode.crtc_htotal);
|
||||||
b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
|
b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
|
||||||
wm->active_time.full = rfixed_mul(line_time, b);
|
wm->active_time.full = dfixed_mul(line_time, b);
|
||||||
wm->active_time.full = rfixed_div(wm->active_time, a);
|
wm->active_time.full = dfixed_div(wm->active_time, a);
|
||||||
|
|
||||||
/* Determine chunk time
|
/* Determine chunk time
|
||||||
* ChunkTime = the time it takes the DCP to send one chunk of data
|
* ChunkTime = the time it takes the DCP to send one chunk of data
|
||||||
* to the LB which consists of pipeline delay and inter chunk gap
|
* to the LB which consists of pipeline delay and inter chunk gap
|
||||||
* sclk = system clock(Mhz)
|
* sclk = system clock(Mhz)
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(600 * 1000);
|
a.full = dfixed_const(600 * 1000);
|
||||||
chunk_time.full = rfixed_div(a, rdev->pm.sclk);
|
chunk_time.full = dfixed_div(a, rdev->pm.sclk);
|
||||||
read_delay_latency.full = rfixed_const(1000);
|
read_delay_latency.full = dfixed_const(1000);
|
||||||
|
|
||||||
/* Determine the worst case latency
|
/* Determine the worst case latency
|
||||||
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
|
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
|
||||||
|
@ -871,9 +871,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
|
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
|
||||||
* which consists of pipeline delay and inter chunk gap
|
* which consists of pipeline delay and inter chunk gap
|
||||||
*/
|
*/
|
||||||
if (rfixed_trunc(wm->num_line_pair) > 1) {
|
if (dfixed_trunc(wm->num_line_pair) > 1) {
|
||||||
a.full = rfixed_const(3);
|
a.full = dfixed_const(3);
|
||||||
wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
|
wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
|
||||||
wm->worst_case_latency.full += read_delay_latency.full;
|
wm->worst_case_latency.full += read_delay_latency.full;
|
||||||
} else {
|
} else {
|
||||||
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
|
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
|
||||||
|
@ -889,34 +889,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
|
||||||
* of data to the LB which consists of
|
* of data to the LB which consists of
|
||||||
* pipeline delay and inter chunk gap
|
* pipeline delay and inter chunk gap
|
||||||
*/
|
*/
|
||||||
if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
|
if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
|
||||||
tolerable_latency.full = line_time.full;
|
tolerable_latency.full = line_time.full;
|
||||||
} else {
|
} else {
|
||||||
tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
|
tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
|
||||||
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
|
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
|
||||||
tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
|
tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
|
||||||
tolerable_latency.full = line_time.full - tolerable_latency.full;
|
tolerable_latency.full = line_time.full - tolerable_latency.full;
|
||||||
}
|
}
|
||||||
/* We assume worst case 32bits (4 bytes) */
|
/* We assume worst case 32bits (4 bytes) */
|
||||||
wm->dbpp.full = rfixed_const(2 * 16);
|
wm->dbpp.full = dfixed_const(2 * 16);
|
||||||
|
|
||||||
/* Determine the maximum priority mark
|
/* Determine the maximum priority mark
|
||||||
* width = viewport width in pixels
|
* width = viewport width in pixels
|
||||||
*/
|
*/
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
|
wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
|
||||||
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
|
wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
|
||||||
wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
|
wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
|
||||||
|
|
||||||
/* Determine estimated width */
|
/* Determine estimated width */
|
||||||
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
|
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
|
||||||
estimated_width.full = rfixed_div(estimated_width, consumption_time);
|
estimated_width.full = dfixed_div(estimated_width, consumption_time);
|
||||||
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
|
if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
|
||||||
wm->priority_mark.full = wm->priority_mark_max.full;
|
wm->priority_mark.full = wm->priority_mark_max.full;
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
wm->priority_mark.full = rfixed_div(estimated_width, a);
|
wm->priority_mark.full = dfixed_div(estimated_width, a);
|
||||||
wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
|
wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
|
||||||
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
|
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -945,58 +945,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
|
||||||
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
|
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
|
||||||
|
|
||||||
if (mode0 && mode1) {
|
if (mode0 && mode1) {
|
||||||
if (rfixed_trunc(wm0.dbpp) > 64)
|
if (dfixed_trunc(wm0.dbpp) > 64)
|
||||||
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
|
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
|
||||||
else
|
else
|
||||||
a.full = wm0.num_line_pair.full;
|
a.full = wm0.num_line_pair.full;
|
||||||
if (rfixed_trunc(wm1.dbpp) > 64)
|
if (dfixed_trunc(wm1.dbpp) > 64)
|
||||||
b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
|
b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
|
||||||
else
|
else
|
||||||
b.full = wm1.num_line_pair.full;
|
b.full = wm1.num_line_pair.full;
|
||||||
a.full += b.full;
|
a.full += b.full;
|
||||||
fill_rate.full = rfixed_div(wm0.sclk, a);
|
fill_rate.full = dfixed_div(wm0.sclk, a);
|
||||||
if (wm0.consumption_rate.full > fill_rate.full) {
|
if (wm0.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm0.consumption_rate.full - fill_rate.full;
|
b.full = wm0.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm0.active_time);
|
b.full = dfixed_mul(b, wm0.active_time);
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
b.full = rfixed_div(b, a);
|
b.full = dfixed_div(b, a);
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
priority_mark02.full = a.full + b.full;
|
priority_mark02.full = a.full + b.full;
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark02.full = rfixed_div(a, b);
|
priority_mark02.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm1.consumption_rate.full > fill_rate.full) {
|
if (wm1.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm1.consumption_rate.full - fill_rate.full;
|
b.full = wm1.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm1.active_time);
|
b.full = dfixed_mul(b, wm1.active_time);
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
b.full = rfixed_div(b, a);
|
b.full = dfixed_div(b, a);
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
priority_mark12.full = a.full + b.full;
|
priority_mark12.full = a.full + b.full;
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark12.full = rfixed_div(a, b);
|
priority_mark12.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm0.priority_mark.full > priority_mark02.full)
|
if (wm0.priority_mark.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark.full;
|
priority_mark02.full = wm0.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark02) < 0)
|
if (dfixed_trunc(priority_mark02) < 0)
|
||||||
priority_mark02.full = 0;
|
priority_mark02.full = 0;
|
||||||
if (wm0.priority_mark_max.full > priority_mark02.full)
|
if (wm0.priority_mark_max.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark_max.full;
|
priority_mark02.full = wm0.priority_mark_max.full;
|
||||||
if (wm1.priority_mark.full > priority_mark12.full)
|
if (wm1.priority_mark.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark.full;
|
priority_mark12.full = wm1.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark12) < 0)
|
if (dfixed_trunc(priority_mark12) < 0)
|
||||||
priority_mark12.full = 0;
|
priority_mark12.full = 0;
|
||||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark_max.full;
|
priority_mark12.full = wm1.priority_mark_max.full;
|
||||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
|
||||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
|
||||||
if (rdev->disp_priority == 2) {
|
if (rdev->disp_priority == 2) {
|
||||||
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||||
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||||
|
@ -1006,32 +1006,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
|
||||||
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
|
||||||
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
|
||||||
} else if (mode0) {
|
} else if (mode0) {
|
||||||
if (rfixed_trunc(wm0.dbpp) > 64)
|
if (dfixed_trunc(wm0.dbpp) > 64)
|
||||||
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
|
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
|
||||||
else
|
else
|
||||||
a.full = wm0.num_line_pair.full;
|
a.full = wm0.num_line_pair.full;
|
||||||
fill_rate.full = rfixed_div(wm0.sclk, a);
|
fill_rate.full = dfixed_div(wm0.sclk, a);
|
||||||
if (wm0.consumption_rate.full > fill_rate.full) {
|
if (wm0.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm0.consumption_rate.full - fill_rate.full;
|
b.full = wm0.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm0.active_time);
|
b.full = dfixed_mul(b, wm0.active_time);
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
b.full = rfixed_div(b, a);
|
b.full = dfixed_div(b, a);
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
priority_mark02.full = a.full + b.full;
|
priority_mark02.full = a.full + b.full;
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm0.worst_case_latency,
|
a.full = dfixed_mul(wm0.worst_case_latency,
|
||||||
wm0.consumption_rate);
|
wm0.consumption_rate);
|
||||||
b.full = rfixed_const(16);
|
b.full = dfixed_const(16);
|
||||||
priority_mark02.full = rfixed_div(a, b);
|
priority_mark02.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm0.priority_mark.full > priority_mark02.full)
|
if (wm0.priority_mark.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark.full;
|
priority_mark02.full = wm0.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark02) < 0)
|
if (dfixed_trunc(priority_mark02) < 0)
|
||||||
priority_mark02.full = 0;
|
priority_mark02.full = 0;
|
||||||
if (wm0.priority_mark_max.full > priority_mark02.full)
|
if (wm0.priority_mark_max.full > priority_mark02.full)
|
||||||
priority_mark02.full = wm0.priority_mark_max.full;
|
priority_mark02.full = wm0.priority_mark_max.full;
|
||||||
d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
|
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
|
||||||
if (rdev->disp_priority == 2)
|
if (rdev->disp_priority == 2)
|
||||||
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||||
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
|
||||||
|
@ -1039,32 +1039,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
|
||||||
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
|
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
|
||||||
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
|
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
|
||||||
} else {
|
} else {
|
||||||
if (rfixed_trunc(wm1.dbpp) > 64)
|
if (dfixed_trunc(wm1.dbpp) > 64)
|
||||||
a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
|
a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
|
||||||
else
|
else
|
||||||
a.full = wm1.num_line_pair.full;
|
a.full = wm1.num_line_pair.full;
|
||||||
fill_rate.full = rfixed_div(wm1.sclk, a);
|
fill_rate.full = dfixed_div(wm1.sclk, a);
|
||||||
if (wm1.consumption_rate.full > fill_rate.full) {
|
if (wm1.consumption_rate.full > fill_rate.full) {
|
||||||
b.full = wm1.consumption_rate.full - fill_rate.full;
|
b.full = wm1.consumption_rate.full - fill_rate.full;
|
||||||
b.full = rfixed_mul(b, wm1.active_time);
|
b.full = dfixed_mul(b, wm1.active_time);
|
||||||
a.full = rfixed_const(16);
|
a.full = dfixed_const(16);
|
||||||
b.full = rfixed_div(b, a);
|
b.full = dfixed_div(b, a);
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
priority_mark12.full = a.full + b.full;
|
priority_mark12.full = a.full + b.full;
|
||||||
} else {
|
} else {
|
||||||
a.full = rfixed_mul(wm1.worst_case_latency,
|
a.full = dfixed_mul(wm1.worst_case_latency,
|
||||||
wm1.consumption_rate);
|
wm1.consumption_rate);
|
||||||
b.full = rfixed_const(16 * 1000);
|
b.full = dfixed_const(16 * 1000);
|
||||||
priority_mark12.full = rfixed_div(a, b);
|
priority_mark12.full = dfixed_div(a, b);
|
||||||
}
|
}
|
||||||
if (wm1.priority_mark.full > priority_mark12.full)
|
if (wm1.priority_mark.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark.full;
|
priority_mark12.full = wm1.priority_mark.full;
|
||||||
if (rfixed_trunc(priority_mark12) < 0)
|
if (dfixed_trunc(priority_mark12) < 0)
|
||||||
priority_mark12.full = 0;
|
priority_mark12.full = 0;
|
||||||
if (wm1.priority_mark_max.full > priority_mark12.full)
|
if (wm1.priority_mark_max.full > priority_mark12.full)
|
||||||
priority_mark12.full = wm1.priority_mark_max.full;
|
priority_mark12.full = wm1.priority_mark_max.full;
|
||||||
d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
|
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
|
||||||
if (rdev->disp_priority == 2)
|
if (rdev->disp_priority == 2)
|
||||||
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
|
||||||
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
|
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
|
||||||
|
|
|
@ -21,41 +21,41 @@
|
||||||
*
|
*
|
||||||
* Authors: Dave Airlie
|
* Authors: Dave Airlie
|
||||||
*/
|
*/
|
||||||
#ifndef RADEON_FIXED_H
|
#ifndef DRM_FIXED_H
|
||||||
#define RADEON_FIXED_H
|
#define DRM_FIXED_H
|
||||||
|
|
||||||
typedef union rfixed {
|
typedef union dfixed {
|
||||||
u32 full;
|
u32 full;
|
||||||
} fixed20_12;
|
} fixed20_12;
|
||||||
|
|
||||||
|
|
||||||
#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
|
#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
|
||||||
#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
|
#define dfixed_const_half(A) (u32)(((A) << 12) + 2048)
|
||||||
#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
|
#define dfixed_const_666(A) (u32)(((A) << 12) + 2731)
|
||||||
#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
|
#define dfixed_const_8(A) (u32)(((A) << 12) + 3277)
|
||||||
#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
|
#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
|
||||||
#define fixed_init(A) { .full = rfixed_const((A)) }
|
#define dfixed_init(A) { .full = dfixed_const((A)) }
|
||||||
#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
|
#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
|
||||||
#define rfixed_trunc(A) ((A).full >> 12)
|
#define dfixed_trunc(A) ((A).full >> 12)
|
||||||
|
|
||||||
static inline u32 rfixed_floor(fixed20_12 A)
|
static inline u32 dfixed_floor(fixed20_12 A)
|
||||||
{
|
{
|
||||||
u32 non_frac = rfixed_trunc(A);
|
u32 non_frac = dfixed_trunc(A);
|
||||||
|
|
||||||
return rfixed_const(non_frac);
|
return dfixed_const(non_frac);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 rfixed_ceil(fixed20_12 A)
|
static inline u32 dfixed_ceil(fixed20_12 A)
|
||||||
{
|
{
|
||||||
u32 non_frac = rfixed_trunc(A);
|
u32 non_frac = dfixed_trunc(A);
|
||||||
|
|
||||||
if (A.full > rfixed_const(non_frac))
|
if (A.full > dfixed_const(non_frac))
|
||||||
return rfixed_const(non_frac + 1);
|
return dfixed_const(non_frac + 1);
|
||||||
else
|
else
|
||||||
return rfixed_const(non_frac);
|
return dfixed_const(non_frac);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
|
static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
|
||||||
{
|
{
|
||||||
u64 tmp = ((u64)A.full << 13);
|
u64 tmp = ((u64)A.full << 13);
|
||||||
|
|
Loading…
Reference in a new issue