[IA64-SGI] volatile semantics in places where it seems necessary
Resend using accessors instead of volatile qualifiers per hch comments, and easier to understand convenience macros per rja comments. Patch to apply volatile semantics when accessing MMR's in various SN files. Signed-off-by: Mark Maule <maule@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
4706df3d3c
commit
5fbcf9a5c6
5 changed files with 76 additions and 65 deletions
|
@ -29,10 +29,10 @@ void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ptr->tio.cp_control &= ~bits;
|
||||
__sn_clrq_relaxed(&ptr->tio.cp_control, bits);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ptr->pic.p_wid_control &= ~bits;
|
||||
__sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -49,10 +49,10 @@ void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ptr->tio.cp_control |= bits;
|
||||
__sn_setq_relaxed(&ptr->tio.cp_control, bits);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ptr->pic.p_wid_control |= bits;
|
||||
__sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -73,10 +73,10 @@ uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ret = ptr->tio.cp_tflush;
|
||||
ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ret = ptr->pic.p_wid_tflush;
|
||||
ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -103,10 +103,10 @@ uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ret = ptr->tio.cp_int_status;
|
||||
ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ret = ptr->pic.p_int_status;
|
||||
ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -127,10 +127,10 @@ void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ptr->tio.cp_int_enable &= ~bits;
|
||||
__sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ptr->pic.p_int_enable &= ~bits;
|
||||
__sn_clrq_relaxed(&ptr->pic.p_int_enable, ~bits);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -147,10 +147,10 @@ void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ptr->tio.cp_int_enable |= bits;
|
||||
__sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ptr->pic.p_int_enable |= bits;
|
||||
__sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -171,14 +171,16 @@ void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
|
||||
ptr->tio.cp_int_addr[int_n] |=
|
||||
(addr & TIOCP_HOST_INTR_ADDR);
|
||||
__sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
|
||||
TIOCP_HOST_INTR_ADDR);
|
||||
__sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
|
||||
(addr & TIOCP_HOST_INTR_ADDR));
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
|
||||
ptr->pic.p_int_addr[int_n] |=
|
||||
(addr & PIC_HOST_INTR_ADDR);
|
||||
__sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
|
||||
PIC_HOST_INTR_ADDR);
|
||||
__sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
|
||||
(addr & PIC_HOST_INTR_ADDR));
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -198,10 +200,10 @@ void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ptr->tio.cp_force_pin[int_n] = 1;
|
||||
writeq(1, &ptr->tio.cp_force_pin[int_n]);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ptr->pic.p_force_pin[int_n] = 1;
|
||||
writeq(1, &ptr->pic.p_force_pin[int_n]);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -222,10 +224,12 @@ uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ret = ptr->tio.cp_wr_req_buf[device];
|
||||
ret =
|
||||
__sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ret = ptr->pic.p_wr_req_buf[device];
|
||||
ret =
|
||||
__sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
|
||||
break;
|
||||
default:
|
||||
panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr);
|
||||
|
@ -244,10 +248,10 @@ void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val;
|
||||
writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val;
|
||||
writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
@ -265,12 +269,10 @@ uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
|
|||
if (pcibus_info) {
|
||||
switch (pcibus_info->pbi_bridge_type) {
|
||||
case PCIBR_BRIDGETYPE_TIOCP:
|
||||
ret =
|
||||
(uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]);
|
||||
ret = &ptr->tio.cp_int_ate_ram[ate_index];
|
||||
break;
|
||||
case PCIBR_BRIDGETYPE_PIC:
|
||||
ret =
|
||||
(uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]);
|
||||
ret = &ptr->pic.p_int_ate_ram[ate_index];
|
||||
break;
|
||||
default:
|
||||
panic
|
||||
|
|
|
@ -37,7 +37,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
|
|||
uint64_t offset;
|
||||
struct page *tmp;
|
||||
struct tioca_common *tioca_common;
|
||||
volatile struct tioca *ca_base;
|
||||
struct tioca *ca_base;
|
||||
|
||||
tioca_common = tioca_kern->ca_common;
|
||||
ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
|
||||
|
@ -174,27 +174,29 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
|
|||
* DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
|
||||
*/
|
||||
|
||||
ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */
|
||||
ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM);
|
||||
ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT);
|
||||
__sn_setq_relaxed(&ca_base->ca_control1,
|
||||
CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */
|
||||
__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
|
||||
__sn_setq_relaxed(&ca_base->ca_control2,
|
||||
(0x2ull << CA_GART_MEM_PARAM_SHFT));
|
||||
tioca_kern->ca_gart_iscoherent = 1;
|
||||
ca_base->ca_control2 &=
|
||||
~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB);
|
||||
__sn_clrq_relaxed(&ca_base->ca_control2,
|
||||
(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
|
||||
|
||||
/*
|
||||
* Unmask GART fetch error interrupts. Clear residual errors first.
|
||||
*/
|
||||
|
||||
ca_base->ca_int_status_alias = CA_GART_FETCH_ERR;
|
||||
ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR;
|
||||
ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR;
|
||||
writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
|
||||
writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
|
||||
__sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
|
||||
|
||||
/*
|
||||
* Program the aperature and gart registers in TIOCA
|
||||
*/
|
||||
|
||||
ca_base->ca_gart_aperature = ap_reg;
|
||||
ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1;
|
||||
writeq(ap_reg, &ca_base->ca_gart_aperature);
|
||||
writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -211,7 +213,6 @@ void
|
|||
tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
|
||||
{
|
||||
int cap_ptr;
|
||||
uint64_t ca_control1;
|
||||
uint32_t reg;
|
||||
struct tioca *tioca_base;
|
||||
struct pci_dev *pdev;
|
||||
|
@ -256,9 +257,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
|
|||
*/
|
||||
|
||||
tioca_base = (struct tioca *)common->ca_common.bs_base;
|
||||
ca_control1 = tioca_base->ca_control1;
|
||||
ca_control1 |= CA_AGP_FW_ENABLE;
|
||||
tioca_base->ca_control1 = ca_control1;
|
||||
__sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
|
||||
|
@ -345,7 +344,7 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
agp_dma_extn = ca_base->ca_agp_dma_addr_extn;
|
||||
agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
|
||||
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
|
||||
printk(KERN_ERR "%s: coretalk upper node (%u) "
|
||||
"mismatch with ca_agp_dma_addr_extn (%lu)\n",
|
||||
|
|
|
@ -227,7 +227,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
|
|||
|
||||
ate = ATE_MAKE(addr, pagesize);
|
||||
ate_shadow[i + j] = ate;
|
||||
ate_reg[i + j] = ate;
|
||||
writeq(ate, &ate_reg[i + j]);
|
||||
addr += pagesize;
|
||||
}
|
||||
|
||||
|
@ -268,10 +268,10 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr)
|
|||
pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port);
|
||||
|
||||
if (ce_kern->ce_port[port].dirmap_refcnt == 0) {
|
||||
volatile uint64_t tmp;
|
||||
uint64_t tmp;
|
||||
|
||||
ce_kern->ce_port[port].dirmap_shadow = ct_upper;
|
||||
ce_mmr->ce_ure_dir_map[port] = ct_upper;
|
||||
writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]);
|
||||
tmp = ce_mmr->ce_ure_dir_map[port];
|
||||
dma_ok = 1;
|
||||
} else
|
||||
|
@ -343,7 +343,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
|
|||
if (TIOCE_D32_ADDR(bus_addr)) {
|
||||
if (--ce_kern->ce_port[port].dirmap_refcnt == 0) {
|
||||
ce_kern->ce_port[port].dirmap_shadow = 0;
|
||||
ce_mmr->ce_ure_dir_map[port] = 0;
|
||||
writeq(0, &ce_mmr->ce_ure_dir_map[port]);
|
||||
}
|
||||
} else {
|
||||
struct tioce_dmamap *map;
|
||||
|
@ -582,18 +582,18 @@ tioce_kern_init(struct tioce_common *tioce_common)
|
|||
*/
|
||||
|
||||
tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base;
|
||||
tioce_mmr->ce_ure_page_map &= ~CE_URE_PAGESIZE_MASK;
|
||||
tioce_mmr->ce_ure_page_map |= CE_URE_256K_PAGESIZE;
|
||||
__sn_clrq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_PAGESIZE_MASK);
|
||||
__sn_setq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_256K_PAGESIZE);
|
||||
tioce_kern->ce_ate3240_pagesize = KB(256);
|
||||
|
||||
for (i = 0; i < TIOCE_NUM_M40_ATES; i++) {
|
||||
tioce_kern->ce_ate40_shadow[i] = 0;
|
||||
tioce_mmr->ce_ure_ate40[i] = 0;
|
||||
writeq(0, &tioce_mmr->ce_ure_ate40[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) {
|
||||
tioce_kern->ce_ate3240_shadow[i] = 0;
|
||||
tioce_mmr->ce_ure_ate3240[i] = 0;
|
||||
writeq(0, &tioce_mmr->ce_ure_ate3240[i]);
|
||||
}
|
||||
|
||||
return tioce_kern;
|
||||
|
@ -665,7 +665,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
|
|||
default:
|
||||
return;
|
||||
}
|
||||
ce_mmr->ce_adm_force_int = force_int_val;
|
||||
writeq(force_int_val, &ce_mmr->ce_adm_force_int);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -686,6 +686,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
|
|||
struct tioce_common *ce_common;
|
||||
struct tioce *ce_mmr;
|
||||
int bit;
|
||||
uint64_t vector;
|
||||
|
||||
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
|
||||
if (!pcidev_info)
|
||||
|
@ -696,11 +697,11 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
|
|||
|
||||
bit = sn_irq_info->irq_int_bit;
|
||||
|
||||
ce_mmr->ce_adm_int_mask |= (1UL << bit);
|
||||
ce_mmr->ce_adm_int_dest[bit] =
|
||||
((uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT) |
|
||||
sn_irq_info->irq_xtalkaddr;
|
||||
ce_mmr->ce_adm_int_mask &= ~(1UL << bit);
|
||||
__sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));
|
||||
vector = (uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
|
||||
vector |= sn_irq_info->irq_xtalkaddr;
|
||||
writeq(vector, &ce_mmr->ce_adm_int_dest[bit]);
|
||||
__sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit));
|
||||
|
||||
tioce_force_interrupt(sn_irq_info);
|
||||
}
|
||||
|
|
|
@ -35,6 +35,15 @@ extern void sn_dma_flush(unsigned long);
|
|||
#define __sn_readl_relaxed ___sn_readl_relaxed
|
||||
#define __sn_readq_relaxed ___sn_readq_relaxed
|
||||
|
||||
/*
|
||||
* Convenience macros for setting/clearing bits using the above accessors
|
||||
*/
|
||||
|
||||
#define __sn_setq_relaxed(addr, val) \
|
||||
writeq((__sn_readq_relaxed(addr) | (val)), (addr))
|
||||
#define __sn_clrq_relaxed(addr, val) \
|
||||
writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
|
||||
|
||||
/*
|
||||
* The following routines are SN Platform specific, called when
|
||||
* a reference is made to inX/outX set macros. SN Platform
|
||||
|
|
|
@ -182,11 +182,11 @@ tioca_tlbflush(struct tioca_kernel *tioca_kernel)
|
|||
* touch every CL aligned GART entry.
|
||||
*/
|
||||
|
||||
ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM);
|
||||
ca_base->ca_control2 |= CA_GART_FLUSH_TLB;
|
||||
ca_base->ca_control2 |=
|
||||
(0x2ull << CA_GART_MEM_PARAM_SHFT);
|
||||
tmp = ca_base->ca_control2;
|
||||
__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
|
||||
__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
|
||||
__sn_setq_relaxed(&ca_base->ca_control2,
|
||||
(0x2ull << CA_GART_MEM_PARAM_SHFT));
|
||||
tmp = __sn_readq_relaxed(&ca_base->ca_control2);
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -196,8 +196,8 @@ tioca_tlbflush(struct tioca_kernel *tioca_kernel)
|
|||
* Gart in uncached mode ... need an explicit flush.
|
||||
*/
|
||||
|
||||
ca_base->ca_control2 |= CA_GART_FLUSH_TLB;
|
||||
tmp = ca_base->ca_control2;
|
||||
__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
|
||||
tmp = __sn_readq_relaxed(&ca_base->ca_control2);
|
||||
}
|
||||
|
||||
extern uint32_t tioca_gart_found;
|
||||
|
|
Loading…
Reference in a new issue