USB: xhci: fix lots of compiler warnings.
Turns out someone never built this code on a 64bit platform. Someone owes me a beer... Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Sarah Sharp <sarah.a.sharp@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
b7258a4aba
commit
700e2052c6
4 changed files with 172 additions and 181 deletions
|
@ -30,12 +30,11 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
|
||||||
{
|
{
|
||||||
u32 temp;
|
u32 temp;
|
||||||
|
|
||||||
xhci_dbg(xhci, "// xHCI capability registers at 0x%x:\n",
|
xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
|
||||||
(unsigned int) xhci->cap_regs);
|
xhci->cap_regs);
|
||||||
temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
|
temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
|
||||||
xhci_dbg(xhci, "// @%x = 0x%x (CAPLENGTH AND HCIVERSION)\n",
|
xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
|
||||||
(unsigned int) &xhci->cap_regs->hc_capbase,
|
&xhci->cap_regs->hc_capbase, temp);
|
||||||
(unsigned int) temp);
|
|
||||||
xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
|
xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
|
||||||
(unsigned int) HC_LENGTH(temp));
|
(unsigned int) HC_LENGTH(temp));
|
||||||
#if 0
|
#if 0
|
||||||
|
@ -43,29 +42,24 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
|
||||||
(unsigned int) HC_VERSION(temp));
|
(unsigned int) HC_VERSION(temp));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
xhci_dbg(xhci, "// xHCI operational registers at 0x%x:\n",
|
xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
|
||||||
(unsigned int) xhci->op_regs);
|
|
||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
|
temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
|
||||||
xhci_dbg(xhci, "// @%x = 0x%x RTSOFF\n",
|
xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
|
||||||
(unsigned int) &xhci->cap_regs->run_regs_off,
|
&xhci->cap_regs->run_regs_off,
|
||||||
(unsigned int) temp & RTSOFF_MASK);
|
(unsigned int) temp & RTSOFF_MASK);
|
||||||
xhci_dbg(xhci, "// xHCI runtime registers at 0x%x:\n",
|
xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
|
||||||
(unsigned int) xhci->run_regs);
|
|
||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
|
temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
|
||||||
xhci_dbg(xhci, "// @%x = 0x%x DBOFF\n",
|
xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
|
||||||
(unsigned int) &xhci->cap_regs->db_off, temp);
|
xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
|
||||||
xhci_dbg(xhci, "// Doorbell array at 0x%x:\n",
|
|
||||||
(unsigned int) xhci->dba);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void xhci_print_cap_regs(struct xhci_hcd *xhci)
|
void xhci_print_cap_regs(struct xhci_hcd *xhci)
|
||||||
{
|
{
|
||||||
u32 temp;
|
u32 temp;
|
||||||
|
|
||||||
xhci_dbg(xhci, "xHCI capability registers at 0x%x:\n",
|
xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
|
||||||
(unsigned int) xhci->cap_regs);
|
|
||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
|
temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
|
||||||
xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
|
xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
|
||||||
|
@ -146,8 +140,7 @@ void xhci_print_status(struct xhci_hcd *xhci)
|
||||||
|
|
||||||
void xhci_print_op_regs(struct xhci_hcd *xhci)
|
void xhci_print_op_regs(struct xhci_hcd *xhci)
|
||||||
{
|
{
|
||||||
xhci_dbg(xhci, "xHCI operational registers at 0x%x:\n",
|
xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
|
||||||
(unsigned int) xhci->op_regs);
|
|
||||||
xhci_print_command_reg(xhci);
|
xhci_print_command_reg(xhci);
|
||||||
xhci_print_status(xhci);
|
xhci_print_status(xhci);
|
||||||
}
|
}
|
||||||
|
@ -168,9 +161,8 @@ void xhci_print_ports(struct xhci_hcd *xhci)
|
||||||
addr = &xhci->op_regs->port_status_base;
|
addr = &xhci->op_regs->port_status_base;
|
||||||
for (i = 0; i < ports; i++) {
|
for (i = 0; i < ports; i++) {
|
||||||
for (j = 0; j < NUM_PORT_REGS; ++j) {
|
for (j = 0; j < NUM_PORT_REGS; ++j) {
|
||||||
xhci_dbg(xhci, "0x%x port %s reg = 0x%x\n",
|
xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
|
||||||
(unsigned int) addr,
|
addr, names[j],
|
||||||
names[j],
|
|
||||||
(unsigned int) xhci_readl(xhci, addr));
|
(unsigned int) xhci_readl(xhci, addr));
|
||||||
addr++;
|
addr++;
|
||||||
}
|
}
|
||||||
|
@ -187,46 +179,46 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct intr_reg *ir_set, int set_n
|
||||||
if (temp == XHCI_INIT_VALUE)
|
if (temp == XHCI_INIT_VALUE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set[%i]\n", (unsigned int) ir_set, set_num);
|
xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
|
||||||
|
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set.pending = 0x%x\n",
|
xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
(unsigned int)temp);
|
||||||
|
|
||||||
addr = &ir_set->irq_control;
|
addr = &ir_set->irq_control;
|
||||||
temp = xhci_readl(xhci, addr);
|
temp = xhci_readl(xhci, addr);
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set.control = 0x%x\n",
|
xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
(unsigned int)temp);
|
||||||
|
|
||||||
addr = &ir_set->erst_size;
|
addr = &ir_set->erst_size;
|
||||||
temp = xhci_readl(xhci, addr);
|
temp = xhci_readl(xhci, addr);
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set.erst_size = 0x%x\n",
|
xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
(unsigned int)temp);
|
||||||
|
|
||||||
addr = &ir_set->rsvd;
|
addr = &ir_set->rsvd;
|
||||||
temp = xhci_readl(xhci, addr);
|
temp = xhci_readl(xhci, addr);
|
||||||
if (temp != XHCI_INIT_VALUE)
|
if (temp != XHCI_INIT_VALUE)
|
||||||
xhci_dbg(xhci, " WARN: 0x%x: ir_set.rsvd = 0x%x\n",
|
xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
addr, (unsigned int)temp);
|
||||||
|
|
||||||
addr = &ir_set->erst_base[0];
|
addr = &ir_set->erst_base[0];
|
||||||
temp = xhci_readl(xhci, addr);
|
temp = xhci_readl(xhci, addr);
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set.erst_base[0] = 0x%x\n",
|
xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n",
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
addr, (unsigned int) temp);
|
||||||
|
|
||||||
addr = &ir_set->erst_base[1];
|
addr = &ir_set->erst_base[1];
|
||||||
temp = xhci_readl(xhci, addr);
|
temp = xhci_readl(xhci, addr);
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set.erst_base[1] = 0x%x\n",
|
xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
addr, (unsigned int) temp);
|
||||||
|
|
||||||
addr = &ir_set->erst_dequeue[0];
|
addr = &ir_set->erst_dequeue[0];
|
||||||
temp = xhci_readl(xhci, addr);
|
temp = xhci_readl(xhci, addr);
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set.erst_dequeue[0] = 0x%x\n",
|
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n",
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
addr, (unsigned int) temp);
|
||||||
|
|
||||||
addr = &ir_set->erst_dequeue[1];
|
addr = &ir_set->erst_dequeue[1];
|
||||||
temp = xhci_readl(xhci, addr);
|
temp = xhci_readl(xhci, addr);
|
||||||
xhci_dbg(xhci, " 0x%x: ir_set.erst_dequeue[1] = 0x%x\n",
|
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
|
||||||
(unsigned int) addr, (unsigned int) temp);
|
addr, (unsigned int) temp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xhci_print_run_regs(struct xhci_hcd *xhci)
|
void xhci_print_run_regs(struct xhci_hcd *xhci)
|
||||||
|
@ -234,17 +226,16 @@ void xhci_print_run_regs(struct xhci_hcd *xhci)
|
||||||
u32 temp;
|
u32 temp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
xhci_dbg(xhci, "xHCI runtime registers at 0x%x:\n",
|
xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
|
||||||
(unsigned int) xhci->run_regs);
|
|
||||||
temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
|
temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
|
||||||
xhci_dbg(xhci, " 0x%x: Microframe index = 0x%x\n",
|
xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
|
||||||
(unsigned int) &xhci->run_regs->microframe_index,
|
&xhci->run_regs->microframe_index,
|
||||||
(unsigned int) temp);
|
(unsigned int) temp);
|
||||||
for (i = 0; i < 7; ++i) {
|
for (i = 0; i < 7; ++i) {
|
||||||
temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
|
temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
|
||||||
if (temp != XHCI_INIT_VALUE)
|
if (temp != XHCI_INIT_VALUE)
|
||||||
xhci_dbg(xhci, " WARN: 0x%x: Rsvd[%i] = 0x%x\n",
|
xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
|
||||||
(unsigned int) &xhci->run_regs->rsvd[i],
|
&xhci->run_regs->rsvd[i],
|
||||||
i, (unsigned int) temp);
|
i, (unsigned int) temp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -347,14 +338,16 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||||
|
|
||||||
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
||||||
{
|
{
|
||||||
xhci_dbg(xhci, "Ring deq = 0x%x (virt), 0x%x (dma)\n",
|
xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
|
||||||
(unsigned int) ring->dequeue,
|
ring->dequeue,
|
||||||
trb_virt_to_dma(ring->deq_seg, ring->dequeue));
|
(unsigned long long)trb_virt_to_dma(ring->deq_seg,
|
||||||
|
ring->dequeue));
|
||||||
xhci_dbg(xhci, "Ring deq updated %u times\n",
|
xhci_dbg(xhci, "Ring deq updated %u times\n",
|
||||||
ring->deq_updates);
|
ring->deq_updates);
|
||||||
xhci_dbg(xhci, "Ring enq = 0x%x (virt), 0x%x (dma)\n",
|
xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
|
||||||
(unsigned int) ring->enqueue,
|
ring->enqueue,
|
||||||
trb_virt_to_dma(ring->enq_seg, ring->enqueue));
|
(unsigned long long)trb_virt_to_dma(ring->enq_seg,
|
||||||
|
ring->enqueue));
|
||||||
xhci_dbg(xhci, "Ring enq updated %u times\n",
|
xhci_dbg(xhci, "Ring enq updated %u times\n",
|
||||||
ring->enq_updates);
|
ring->enq_updates);
|
||||||
}
|
}
|
||||||
|
@ -418,42 +411,42 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_ad
|
||||||
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
||||||
int field_size = 32 / 8;
|
int field_size = 32 / 8;
|
||||||
|
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - drop flags\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
|
||||||
(unsigned int) &ctx->drop_flags,
|
&ctx->drop_flags, (unsigned long long)dma,
|
||||||
dma, ctx->drop_flags);
|
ctx->drop_flags);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - add flags\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
|
||||||
(unsigned int) &ctx->add_flags,
|
&ctx->add_flags, (unsigned long long)dma,
|
||||||
dma, ctx->add_flags);
|
ctx->add_flags);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
for (i = 0; i > 6; ++i) {
|
for (i = 0; i > 6; ++i) {
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
||||||
(unsigned int) &ctx->rsvd[i],
|
&ctx->rsvd[i], (unsigned long long)dma,
|
||||||
dma, ctx->rsvd[i], i);
|
ctx->rsvd[i], i);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
xhci_dbg(xhci, "Slot Context:\n");
|
xhci_dbg(xhci, "Slot Context:\n");
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
|
||||||
(unsigned int) &ctx->slot.dev_info,
|
&ctx->slot.dev_info,
|
||||||
dma, ctx->slot.dev_info);
|
(unsigned long long)dma, ctx->slot.dev_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info2\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
|
||||||
(unsigned int) &ctx->slot.dev_info2,
|
&ctx->slot.dev_info2,
|
||||||
dma, ctx->slot.dev_info2);
|
(unsigned long long)dma, ctx->slot.dev_info2);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tt_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
|
||||||
(unsigned int) &ctx->slot.tt_info,
|
&ctx->slot.tt_info,
|
||||||
dma, ctx->slot.tt_info);
|
(unsigned long long)dma, ctx->slot.tt_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_state\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
|
||||||
(unsigned int) &ctx->slot.dev_state,
|
&ctx->slot.dev_state,
|
||||||
dma, ctx->slot.dev_state);
|
(unsigned long long)dma, ctx->slot.dev_state);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
for (i = 0; i > 4; ++i) {
|
for (i = 0; i > 4; ++i) {
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
||||||
(unsigned int) &ctx->slot.reserved[i],
|
&ctx->slot.reserved[i], (unsigned long long)dma,
|
||||||
dma, ctx->slot.reserved[i], i);
|
ctx->slot.reserved[i], i);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -461,30 +454,31 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_ad
|
||||||
last_ep_ctx = last_ep + 1;
|
last_ep_ctx = last_ep + 1;
|
||||||
for (i = 0; i < last_ep_ctx; ++i) {
|
for (i = 0; i < last_ep_ctx; ++i) {
|
||||||
xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
|
xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
|
||||||
(unsigned int) &ctx->ep[i].ep_info,
|
&ctx->ep[i].ep_info,
|
||||||
dma, ctx->ep[i].ep_info);
|
(unsigned long long)dma, ctx->ep[i].ep_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info2\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
|
||||||
(unsigned int) &ctx->ep[i].ep_info2,
|
&ctx->ep[i].ep_info2,
|
||||||
dma, ctx->ep[i].ep_info2);
|
(unsigned long long)dma, ctx->ep[i].ep_info2);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[0]\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
|
||||||
(unsigned int) &ctx->ep[i].deq[0],
|
&ctx->ep[i].deq[0],
|
||||||
dma, ctx->ep[i].deq[0]);
|
(unsigned long long)dma, ctx->ep[i].deq[0]);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[1]\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
|
||||||
(unsigned int) &ctx->ep[i].deq[1],
|
&ctx->ep[i].deq[1],
|
||||||
dma, ctx->ep[i].deq[1]);
|
(unsigned long long)dma, ctx->ep[i].deq[1]);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tx_info\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
|
||||||
(unsigned int) &ctx->ep[i].tx_info,
|
&ctx->ep[i].tx_info,
|
||||||
dma, ctx->ep[i].tx_info);
|
(unsigned long long)dma, ctx->ep[i].tx_info);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
for (j = 0; j < 3; ++j) {
|
for (j = 0; j < 3; ++j) {
|
||||||
xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
|
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
|
||||||
(unsigned int) &ctx->ep[i].reserved[j],
|
&ctx->ep[i].reserved[j],
|
||||||
dma, ctx->ep[i].reserved[j], j);
|
(unsigned long long)dma,
|
||||||
|
ctx->ep[i].reserved[j], j);
|
||||||
dma += field_size;
|
dma += field_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -397,10 +397,8 @@ int xhci_run(struct usb_hcd *hcd)
|
||||||
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
||||||
|
|
||||||
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
||||||
xhci_dbg(xhci, "// Enabling event ring interrupter 0x%x"
|
xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
|
||||||
" by writing 0x%x to irq_pending\n",
|
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
|
||||||
(unsigned int) xhci->ir_set,
|
|
||||||
(unsigned int) ER_IRQ_ENABLE(temp));
|
|
||||||
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
|
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
|
||||||
&xhci->ir_set->irq_pending);
|
&xhci->ir_set->irq_pending);
|
||||||
xhci_print_ir_set(xhci, xhci->ir_set, 0);
|
xhci_print_ir_set(xhci, xhci->ir_set, 0);
|
||||||
|
@ -431,8 +429,7 @@ int xhci_run(struct usb_hcd *hcd)
|
||||||
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
xhci_writel(xhci, temp, &xhci->op_regs->command);
|
||||||
/* Flush PCI posted writes */
|
/* Flush PCI posted writes */
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->command);
|
temp = xhci_readl(xhci, &xhci->op_regs->command);
|
||||||
xhci_dbg(xhci, "// @%x = 0x%x\n",
|
xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
|
||||||
(unsigned int) &xhci->op_regs->command, temp);
|
|
||||||
if (doorbell)
|
if (doorbell)
|
||||||
(*doorbell)(xhci);
|
(*doorbell)(xhci);
|
||||||
|
|
||||||
|
@ -660,7 +657,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
||||||
if (ret || !urb->hcpriv)
|
if (ret || !urb->hcpriv)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
xhci_dbg(xhci, "Cancel URB 0x%x\n", (unsigned int) urb);
|
xhci_dbg(xhci, "Cancel URB %p\n", urb);
|
||||||
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
|
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
|
||||||
ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
|
ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
|
||||||
td = (struct xhci_td *) urb->hcpriv;
|
td = (struct xhci_td *) urb->hcpriv;
|
||||||
|
@ -702,10 +699,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = xhci_check_args(hcd, udev, ep, 1, __func__);
|
ret = xhci_check_args(hcd, udev, ep, 1, __func__);
|
||||||
xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
|
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
return ret;
|
return ret;
|
||||||
xhci = hcd_to_xhci(hcd);
|
xhci = hcd_to_xhci(hcd);
|
||||||
|
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
|
||||||
|
|
||||||
drop_flag = xhci_get_endpoint_flag(&ep->desc);
|
drop_flag = xhci_get_endpoint_flag(&ep->desc);
|
||||||
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
|
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
|
||||||
|
@ -730,8 +727,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||||
*/
|
*/
|
||||||
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
|
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
|
||||||
in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
||||||
xhci_warn(xhci, "xHCI %s called with disabled ep %#x\n",
|
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
|
||||||
__func__, (unsigned int) ep);
|
__func__, ep);
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -817,8 +814,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
|
||||||
* ignore this request.
|
* ignore this request.
|
||||||
*/
|
*/
|
||||||
if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
|
||||||
xhci_warn(xhci, "xHCI %s called with enabled ep %#x\n",
|
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
|
||||||
__func__, (unsigned int) ep);
|
__func__, ep);
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -904,7 +901,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
|
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
|
||||||
virt_dev = xhci->devs[udev->slot_id];
|
virt_dev = xhci->devs[udev->slot_id];
|
||||||
|
|
||||||
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
|
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
|
||||||
|
@ -1009,7 +1006,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
|
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
|
||||||
virt_dev = xhci->devs[udev->slot_id];
|
virt_dev = xhci->devs[udev->slot_id];
|
||||||
/* Free any rings allocated for added endpoints */
|
/* Free any rings allocated for added endpoints */
|
||||||
for (i = 0; i < 31; ++i) {
|
for (i = 0; i < 31; ++i) {
|
||||||
|
@ -1184,16 +1181,16 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||||
xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
|
xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
|
||||||
temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
|
temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
|
||||||
xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
|
xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
|
||||||
xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%08x = %#08x\n",
|
xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
|
||||||
udev->slot_id,
|
udev->slot_id,
|
||||||
(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
|
&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
|
||||||
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
|
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
|
||||||
xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%08x = %#08x\n",
|
xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
|
||||||
udev->slot_id,
|
udev->slot_id,
|
||||||
(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
|
&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
|
||||||
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
|
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
|
||||||
xhci_dbg(xhci, "Output Context DMA address = %#08x\n",
|
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
|
||||||
virt_dev->out_ctx_dma);
|
(unsigned long long)virt_dev->out_ctx_dma);
|
||||||
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
|
||||||
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
|
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
|
||||||
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
|
||||||
|
|
|
@ -40,16 +40,15 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
|
||||||
seg = kzalloc(sizeof *seg, flags);
|
seg = kzalloc(sizeof *seg, flags);
|
||||||
if (!seg)
|
if (!seg)
|
||||||
return 0;
|
return 0;
|
||||||
xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n",
|
xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
|
||||||
(unsigned int) seg);
|
|
||||||
|
|
||||||
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
|
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
|
||||||
if (!seg->trbs) {
|
if (!seg->trbs) {
|
||||||
kfree(seg);
|
kfree(seg);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n",
|
xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
|
||||||
(unsigned int) seg->trbs, (u32) dma);
|
seg->trbs, (unsigned long long)dma);
|
||||||
|
|
||||||
memset(seg->trbs, 0, SEGMENT_SIZE);
|
memset(seg->trbs, 0, SEGMENT_SIZE);
|
||||||
seg->dma = dma;
|
seg->dma = dma;
|
||||||
|
@ -63,14 +62,12 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||||
if (!seg)
|
if (!seg)
|
||||||
return;
|
return;
|
||||||
if (seg->trbs) {
|
if (seg->trbs) {
|
||||||
xhci_dbg(xhci, "Freeing DMA segment at 0x%x"
|
xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
|
||||||
" (virtual) 0x%x (DMA)\n",
|
seg->trbs, (unsigned long long)seg->dma);
|
||||||
(unsigned int) seg->trbs, (u32) seg->dma);
|
|
||||||
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
|
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
|
||||||
seg->trbs = NULL;
|
seg->trbs = NULL;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n",
|
xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
|
||||||
(unsigned int) seg);
|
|
||||||
kfree(seg);
|
kfree(seg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,8 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
|
||||||
val |= TRB_TYPE(TRB_LINK);
|
val |= TRB_TYPE(TRB_LINK);
|
||||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
|
prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n",
|
xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
|
||||||
prev->dma, next->dma);
|
(unsigned long long)prev->dma,
|
||||||
|
(unsigned long long)next->dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX: Do we need the hcd structure in all these functions? */
|
/* XXX: Do we need the hcd structure in all these functions? */
|
||||||
|
@ -112,7 +110,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
||||||
return;
|
return;
|
||||||
first_seg = ring->first_seg;
|
first_seg = ring->first_seg;
|
||||||
seg = first_seg->next;
|
seg = first_seg->next;
|
||||||
xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring);
|
xhci_dbg(xhci, "Freeing ring at %p\n", ring);
|
||||||
while (seg != first_seg) {
|
while (seg != first_seg) {
|
||||||
struct xhci_segment *next = seg->next;
|
struct xhci_segment *next = seg->next;
|
||||||
xhci_segment_free(xhci, seg);
|
xhci_segment_free(xhci, seg);
|
||||||
|
@ -137,7 +135,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||||
struct xhci_segment *prev;
|
struct xhci_segment *prev;
|
||||||
|
|
||||||
ring = kzalloc(sizeof *(ring), flags);
|
ring = kzalloc(sizeof *(ring), flags);
|
||||||
xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring);
|
xhci_dbg(xhci, "Allocating ring at %p\n", ring);
|
||||||
if (!ring)
|
if (!ring)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -169,8 +167,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||||
/* See section 4.9.2.1 and 6.4.4.1 */
|
/* See section 4.9.2.1 and 6.4.4.1 */
|
||||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
|
prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
|
||||||
xhci_dbg(xhci, "Wrote link toggle flag to"
|
xhci_dbg(xhci, "Wrote link toggle flag to"
|
||||||
" segment 0x%x (virtual), 0x%x (DMA)\n",
|
" segment %p (virtual), 0x%llx (DMA)\n",
|
||||||
(unsigned int) prev, (u32) prev->dma);
|
prev, (unsigned long long)prev->dma);
|
||||||
}
|
}
|
||||||
/* The ring is empty, so the enqueue pointer == dequeue pointer */
|
/* The ring is empty, so the enqueue pointer == dequeue pointer */
|
||||||
ring->enqueue = ring->first_seg->trbs;
|
ring->enqueue = ring->first_seg->trbs;
|
||||||
|
@ -242,7 +240,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
||||||
if (!dev->out_ctx)
|
if (!dev->out_ctx)
|
||||||
goto fail;
|
goto fail;
|
||||||
dev->out_ctx_dma = dma;
|
dev->out_ctx_dma = dma;
|
||||||
xhci_dbg(xhci, "Slot %d output ctx = 0x%x (dma)\n", slot_id, dma);
|
xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
|
||||||
|
(unsigned long long)dma);
|
||||||
memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
|
memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
|
||||||
|
|
||||||
/* Allocate the (input) device context for address device command */
|
/* Allocate the (input) device context for address device command */
|
||||||
|
@ -250,7 +249,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
||||||
if (!dev->in_ctx)
|
if (!dev->in_ctx)
|
||||||
goto fail;
|
goto fail;
|
||||||
dev->in_ctx_dma = dma;
|
dev->in_ctx_dma = dma;
|
||||||
xhci_dbg(xhci, "Slot %d input ctx = 0x%x (dma)\n", slot_id, dma);
|
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
|
||||||
|
(unsigned long long)dma);
|
||||||
memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
|
memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
|
||||||
|
|
||||||
/* Allocate endpoint 0 ring */
|
/* Allocate endpoint 0 ring */
|
||||||
|
@ -266,10 +266,10 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
|
||||||
*/
|
*/
|
||||||
xhci->dcbaa->dev_context_ptrs[2*slot_id] =
|
xhci->dcbaa->dev_context_ptrs[2*slot_id] =
|
||||||
(u32) dev->out_ctx_dma + (32);
|
(u32) dev->out_ctx_dma + (32);
|
||||||
xhci_dbg(xhci, "Set slot id %d dcbaa entry 0x%x to 0x%x\n",
|
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
|
||||||
slot_id,
|
slot_id,
|
||||||
(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*slot_id],
|
&xhci->dcbaa->dev_context_ptrs[2*slot_id],
|
||||||
dev->out_ctx_dma);
|
(unsigned long long)dev->out_ctx_dma);
|
||||||
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
|
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -339,7 +339,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
|
||||||
dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
|
dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
|
||||||
dev->in_ctx->slot.tt_info |= udev->ttport << 8;
|
dev->in_ctx->slot.tt_info |= udev->ttport << 8;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, "udev->tt = 0x%x\n", (unsigned int) udev->tt);
|
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
|
||||||
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
|
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
|
||||||
|
|
||||||
/* Step 4 - ring already allocated */
|
/* Step 4 - ring already allocated */
|
||||||
|
@ -643,8 +643,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||||
goto fail;
|
goto fail;
|
||||||
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
|
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
|
||||||
xhci->dcbaa->dma = dma;
|
xhci->dcbaa->dma = dma;
|
||||||
xhci_dbg(xhci, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n",
|
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
|
||||||
xhci->dcbaa->dma, (unsigned int) xhci->dcbaa);
|
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
|
||||||
xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
|
xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
|
||||||
xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
|
xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
|
||||||
|
|
||||||
|
@ -668,8 +668,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||||
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
|
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
|
||||||
if (!xhci->cmd_ring)
|
if (!xhci->cmd_ring)
|
||||||
goto fail;
|
goto fail;
|
||||||
xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring);
|
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
|
||||||
xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma);
|
xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
|
||||||
|
(unsigned long long)xhci->cmd_ring->first_seg->dma);
|
||||||
|
|
||||||
/* Set the address in the Command Ring Control register */
|
/* Set the address in the Command Ring Control register */
|
||||||
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
|
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
|
||||||
|
@ -705,15 +706,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||||
sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
|
sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
|
||||||
if (!xhci->erst.entries)
|
if (!xhci->erst.entries)
|
||||||
goto fail;
|
goto fail;
|
||||||
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma);
|
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
|
||||||
|
(unsigned long long)dma);
|
||||||
|
|
||||||
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
|
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
|
||||||
xhci->erst.num_entries = ERST_NUM_SEGS;
|
xhci->erst.num_entries = ERST_NUM_SEGS;
|
||||||
xhci->erst.erst_dma_addr = dma;
|
xhci->erst.erst_dma_addr = dma;
|
||||||
xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n",
|
xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
|
||||||
xhci->erst.num_entries,
|
xhci->erst.num_entries,
|
||||||
(unsigned int) xhci->erst.entries,
|
xhci->erst.entries,
|
||||||
xhci->erst.erst_dma_addr);
|
(unsigned long long)xhci->erst.erst_dma_addr);
|
||||||
|
|
||||||
/* set ring base address and size for each segment table entry */
|
/* set ring base address and size for each segment table entry */
|
||||||
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
|
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
|
||||||
|
@ -735,8 +737,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||||
|
|
||||||
xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
|
xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
|
||||||
/* set the segment table base address */
|
/* set the segment table base address */
|
||||||
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n",
|
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
|
||||||
xhci->erst.erst_dma_addr);
|
(unsigned long long)xhci->erst.erst_dma_addr);
|
||||||
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
|
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
|
||||||
val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
|
val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
|
||||||
val &= ERST_PTR_MASK;
|
val &= ERST_PTR_MASK;
|
||||||
|
|
|
@ -74,12 +74,12 @@
|
||||||
dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
|
dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
|
||||||
union xhci_trb *trb)
|
union xhci_trb *trb)
|
||||||
{
|
{
|
||||||
unsigned int offset;
|
dma_addr_t offset;
|
||||||
|
|
||||||
if (!seg || !trb || (void *) trb < (void *) seg->trbs)
|
if (!seg || !trb || (void *) trb < (void *) seg->trbs)
|
||||||
return 0;
|
return 0;
|
||||||
/* offset in bytes, since these are byte-addressable */
|
/* offset in bytes, since these are byte-addressable */
|
||||||
offset = (unsigned int) trb - (unsigned int) seg->trbs;
|
offset = trb - seg->trbs;
|
||||||
/* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
|
/* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
|
||||||
if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0)
|
if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -145,8 +145,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
|
||||||
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
|
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
|
||||||
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
||||||
if (!in_interrupt())
|
if (!in_interrupt())
|
||||||
xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
|
xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
|
||||||
(unsigned int) ring,
|
ring,
|
||||||
(unsigned int) ring->cycle_state);
|
(unsigned int) ring->cycle_state);
|
||||||
}
|
}
|
||||||
ring->deq_seg = ring->deq_seg->next;
|
ring->deq_seg = ring->deq_seg->next;
|
||||||
|
@ -195,8 +195,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
|
||||||
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
|
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
|
||||||
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
||||||
if (!in_interrupt())
|
if (!in_interrupt())
|
||||||
xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
|
xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
|
||||||
(unsigned int) ring,
|
ring,
|
||||||
(unsigned int) ring->cycle_state);
|
(unsigned int) ring->cycle_state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -387,12 +387,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
||||||
*/
|
*/
|
||||||
cur_trb->generic.field[3] &= ~TRB_CHAIN;
|
cur_trb->generic.field[3] &= ~TRB_CHAIN;
|
||||||
xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
|
xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
|
||||||
xhci_dbg(xhci, "Address = 0x%x (0x%x dma); "
|
xhci_dbg(xhci, "Address = %p (0x%llx dma); "
|
||||||
"in seg 0x%x (0x%x dma)\n",
|
"in seg %p (0x%llx dma)\n",
|
||||||
(unsigned int) cur_trb,
|
cur_trb,
|
||||||
trb_virt_to_dma(cur_seg, cur_trb),
|
(unsigned long long)trb_virt_to_dma(cur_seg, cur_trb),
|
||||||
(unsigned int) cur_seg,
|
cur_seg,
|
||||||
cur_seg->dma);
|
(unsigned long long)cur_seg->dma);
|
||||||
} else {
|
} else {
|
||||||
cur_trb->generic.field[0] = 0;
|
cur_trb->generic.field[0] = 0;
|
||||||
cur_trb->generic.field[1] = 0;
|
cur_trb->generic.field[1] = 0;
|
||||||
|
@ -400,12 +400,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
||||||
/* Preserve only the cycle bit of this TRB */
|
/* Preserve only the cycle bit of this TRB */
|
||||||
cur_trb->generic.field[3] &= TRB_CYCLE;
|
cur_trb->generic.field[3] &= TRB_CYCLE;
|
||||||
cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
|
cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
|
||||||
xhci_dbg(xhci, "Cancel TRB 0x%x (0x%x dma) "
|
xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
|
||||||
"in seg 0x%x (0x%x dma)\n",
|
"in seg %p (0x%llx dma)\n",
|
||||||
(unsigned int) cur_trb,
|
cur_trb,
|
||||||
trb_virt_to_dma(cur_seg, cur_trb),
|
(unsigned long long)trb_virt_to_dma(cur_seg, cur_trb),
|
||||||
(unsigned int) cur_seg,
|
cur_seg,
|
||||||
cur_seg->dma);
|
(unsigned long long)cur_seg->dma);
|
||||||
}
|
}
|
||||||
if (cur_trb == cur_td->last_trb)
|
if (cur_trb == cur_td->last_trb)
|
||||||
break;
|
break;
|
||||||
|
@ -456,9 +456,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
||||||
*/
|
*/
|
||||||
list_for_each(entry, &ep_ring->cancelled_td_list) {
|
list_for_each(entry, &ep_ring->cancelled_td_list) {
|
||||||
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
|
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
|
||||||
xhci_dbg(xhci, "Cancelling TD starting at 0x%x, 0x%x (dma).\n",
|
xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
|
||||||
(unsigned int) cur_td->first_trb,
|
cur_td->first_trb,
|
||||||
trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
|
(unsigned long long)trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
|
||||||
/*
|
/*
|
||||||
* If we stopped on the TD we need to cancel, then we have to
|
* If we stopped on the TD we need to cancel, then we have to
|
||||||
* move the xHC endpoint ring dequeue pointer past this TD.
|
* move the xHC endpoint ring dequeue pointer past this TD.
|
||||||
|
@ -480,12 +480,12 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
||||||
|
|
||||||
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
|
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
|
||||||
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
|
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
|
||||||
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = 0x%x (0x%x dma), "
|
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
|
||||||
"new deq ptr = 0x%x (0x%x dma), new cycle = %u\n",
|
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
|
||||||
(unsigned int) deq_state.new_deq_seg,
|
deq_state.new_deq_seg,
|
||||||
deq_state.new_deq_seg->dma,
|
(unsigned long long)deq_state.new_deq_seg->dma,
|
||||||
(unsigned int) deq_state.new_deq_ptr,
|
deq_state.new_deq_ptr,
|
||||||
trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
|
(unsigned long long)trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
|
||||||
deq_state.new_cycle_state);
|
deq_state.new_cycle_state);
|
||||||
queue_set_tr_deq(xhci, slot_id, ep_index,
|
queue_set_tr_deq(xhci, slot_id, ep_index,
|
||||||
deq_state.new_deq_seg,
|
deq_state.new_deq_seg,
|
||||||
|
@ -522,8 +522,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
||||||
cur_td->urb->hcpriv = NULL;
|
cur_td->urb->hcpriv = NULL;
|
||||||
usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
|
usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
|
||||||
|
|
||||||
xhci_dbg(xhci, "Giveback cancelled URB 0x%x\n",
|
xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
|
||||||
(unsigned int) cur_td->urb);
|
|
||||||
spin_unlock(&xhci->lock);
|
spin_unlock(&xhci->lock);
|
||||||
/* Doesn't matter what we pass for status, since the core will
|
/* Doesn't matter what we pass for status, since the core will
|
||||||
* just overwrite it (because the URB has been unlinked).
|
* just overwrite it (because the URB has been unlinked).
|
||||||
|
@ -1183,9 +1182,9 @@ unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
|
||||||
num_trbs++;
|
num_trbs++;
|
||||||
running_total += TRB_MAX_BUFF_SIZE;
|
running_total += TRB_MAX_BUFF_SIZE;
|
||||||
}
|
}
|
||||||
xhci_dbg(xhci, " sg #%d: dma = %#x, len = %#x (%d), num_trbs = %d\n",
|
xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
|
||||||
i, sg_dma_address(sg), len, len,
|
i, (unsigned long long)sg_dma_address(sg),
|
||||||
num_trbs - previous_total_trbs);
|
len, len, num_trbs - previous_total_trbs);
|
||||||
|
|
||||||
len = min_t(int, len, temp);
|
len = min_t(int, len, temp);
|
||||||
temp -= len;
|
temp -= len;
|
||||||
|
@ -1394,11 +1393,11 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||||
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
|
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
|
||||||
|
|
||||||
if (!in_interrupt())
|
if (!in_interrupt())
|
||||||
dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#x, num_trbs = %d\n",
|
dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
|
||||||
urb->ep->desc.bEndpointAddress,
|
urb->ep->desc.bEndpointAddress,
|
||||||
urb->transfer_buffer_length,
|
urb->transfer_buffer_length,
|
||||||
urb->transfer_buffer_length,
|
urb->transfer_buffer_length,
|
||||||
urb->transfer_dma,
|
(unsigned long long)urb->transfer_dma,
|
||||||
num_trbs);
|
num_trbs);
|
||||||
|
|
||||||
ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
|
ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
|
||||||
|
@ -1640,9 +1639,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
|
||||||
addr = trb_virt_to_dma(deq_seg, deq_ptr);
|
addr = trb_virt_to_dma(deq_seg, deq_ptr);
|
||||||
if (addr == 0)
|
if (addr == 0)
|
||||||
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
|
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
|
||||||
xhci_warn(xhci, "WARN deq seg = 0x%x, deq pt = 0x%x\n",
|
xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
|
||||||
(unsigned int) deq_seg,
|
deq_seg, deq_ptr);
|
||||||
(unsigned int) deq_ptr);
|
|
||||||
return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
|
return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
|
||||||
trb_slot_id | trb_ep_index | type);
|
trb_slot_id | trb_ep_index | type);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue