Merge commit 'v3.1-rc9' into sched/core

Merge reason: pick up latest fixes.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2011-10-06 12:43:28 +02:00
commit 9243a169ac
31 changed files with 236 additions and 285 deletions

View file

@ -1042,7 +1042,7 @@ conf/interface/*:
The functional behaviour for certain settings is different The functional behaviour for certain settings is different
depending on whether local forwarding is enabled or not. depending on whether local forwarding is enabled or not.
accept_ra - BOOLEAN accept_ra - INTEGER
Accept Router Advertisements; autoconfigure using them. Accept Router Advertisements; autoconfigure using them.
Possible values are: Possible values are:
@ -1106,7 +1106,7 @@ dad_transmits - INTEGER
The amount of Duplicate Address Detection probes to send. The amount of Duplicate Address Detection probes to send.
Default: 1 Default: 1
forwarding - BOOLEAN forwarding - INTEGER
Configure interface-specific Host/Router behaviour. Configure interface-specific Host/Router behaviour.
Note: It is recommended to have the same setting on all Note: It is recommended to have the same setting on all

View file

@ -6374,7 +6374,6 @@ S: Supported
F: arch/arm/mach-tegra F: arch/arm/mach-tegra
TEHUTI ETHERNET DRIVER TEHUTI ETHERNET DRIVER
M: Alexander Indenbaum <baum@tehutinetworks.net>
M: Andy Gospodarek <andy@greyhouse.net> M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported

View file

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 1 PATCHLEVEL = 1
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc8 EXTRAVERSION = -rc9
NAME = "Divemaster Edition" NAME = "Divemaster Edition"
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
u8 msg[20]; u8 msg[20];
int msg_bytes = send_bytes + 4; int msg_bytes = send_bytes + 4;
u8 ack; u8 ack;
unsigned retry;
if (send_bytes > 16) if (send_bytes > 16)
return -1; return -1;
@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
msg[3] = (msg_bytes << 4) | (send_bytes - 1); msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes); memcpy(&msg[4], send, send_bytes);
while (1) { for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack); msg, msg_bytes, NULL, 0, delay, &ack);
if (ret < 0) if (ret < 0)
return ret; return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
break; return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400); udelay(400);
else else
return -EIO; return -EIO;
} }
return send_bytes; return -EIO;
} }
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
int msg_bytes = 4; int msg_bytes = 4;
u8 ack; u8 ack;
int ret; int ret;
unsigned retry;
msg[0] = address; msg[0] = address;
msg[1] = address >> 8; msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4; msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1); msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
while (1) { for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack); msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == 0)
return -EPROTO;
if (ret < 0) if (ret < 0)
return ret; return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret; return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400); udelay(400);
else if (ret == 0)
return -EPROTO;
else else
return -EIO; return -EIO;
} }
return -EIO;
} }
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,

View file

@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map; return backend_map;
} }
static void evergreen_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
switch (rdev->family) {
case CHIP_HEMLOCK:
case CHIP_CYPRESS:
case CHIP_BARTS:
tcp_chan_steer_lo = 0x54763210;
tcp_chan_steer_hi = 0x0000ba98;
break;
case CHIP_JUNIPER:
case CHIP_REDWOOD:
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
default:
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
static void evergreen_gpu_init(struct radeon_device *rdev) static void evergreen_gpu_init(struct radeon_device *rdev)
{ {
u32 cc_rb_backend_disable = 0; u32 cc_rb_backend_disable = 0;
@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config);
evergreen_program_channel_remap(rdev);
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES; grbm_gfx_index = INSTANCE_BROADCAST_WRITES;

View file

@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map; return backend_map;
} }
static void cayman_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
switch (rdev->family) {
case CHIP_CAYMAN:
default:
//tcp_chan_steer_lo = 0x54763210
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
u32 disable_mask_per_se, u32 disable_mask_per_se,
u32 max_disable_mask_per_se, u32 max_disable_mask_per_se,
@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config);
cayman_program_channel_remap(rdev);
/* primary versions */ /* primary versions */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);

View file

@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
int saved_dpms = connector->dpms; int saved_dpms = connector->dpms;
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && /* Only turn off the display it it's physically disconnected */
radeon_dp_needs_link_train(radeon_connector)) if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
else if (radeon_dp_needs_link_train(radeon_connector))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
connector->dpms = saved_dpms; connector->dpms = saved_dpms;
} }
} }

View file

@ -208,24 +208,26 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int xorigin = 0, yorigin = 0; int xorigin = 0, yorigin = 0;
int w = radeon_crtc->cursor_width; int w = radeon_crtc->cursor_width;
if (x < 0) if (ASIC_IS_AVIVO(rdev)) {
xorigin = -x + 1; /* avivo cursor are offset into the total surface */
if (y < 0) x += crtc->x;
yorigin = -y + 1; y += crtc->y;
if (xorigin >= CURSOR_WIDTH) }
xorigin = CURSOR_WIDTH - 1; DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
if (yorigin >= CURSOR_HEIGHT)
yorigin = CURSOR_HEIGHT - 1; if (x < 0) {
xorigin = min(-x, CURSOR_WIDTH - 1);
x = 0;
}
if (y < 0) {
yorigin = min(-y, CURSOR_HEIGHT - 1);
y = 0;
}
if (ASIC_IS_AVIVO(rdev)) { if (ASIC_IS_AVIVO(rdev)) {
int i = 0; int i = 0;
struct drm_crtc *crtc_p; struct drm_crtc *crtc_p;
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
/* avivo cursor image can't end on 128 pixel boundary or /* avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled * go past the end of the frame if both crtcs are enabled
*/ */
@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
radeon_lock_cursor(crtc, true); radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) { if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
((xorigin ? 0 : x) << 16) |
(yorigin ? 0 : y));
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else if (ASIC_IS_AVIVO(rdev)) { } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
((xorigin ? 0 : x) << 16) |
(yorigin ? 0 : y));
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
| yorigin)); | yorigin));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK (RADEON_CUR_LOCK
| ((xorigin ? 0 : x) << 16) | (x << 16)
| (yorigin ? 0 : y))); | y));
/* offset is from DISP(2)_BASE_ADDRESS */ /* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256))); (yorigin * 256)));

View file

@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map; return backend_map;
} }
static void rv770_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer, mc_shared_chremap, tmp;
bool force_no_swizzle;
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
case 2:
case 3:
if (force_no_swizzle)
mc_shared_chremap = 0x00fac688;
else
mc_shared_chremap = 0x00bbc298;
break;
}
if (rdev->family == CHIP_RV740)
tcp_chan_steer = 0x00ef2a60;
else
tcp_chan_steer = 0x00fac688;
/* RV770 CE has special chremap setup */
if (rdev->pdev->device == 0x944e) {
tcp_chan_steer = 0x00b08b08;
mc_shared_chremap = 0x00b08b08;
}
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
static void rv770_gpu_init(struct radeon_device *rdev) static void rv770_gpu_init(struct radeon_device *rdev)
{ {
int i, j, num_qd_pipes; int i, j, num_qd_pipes;
@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
rv770_program_channel_remap(rdev);
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);

View file

@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
* both have been read. So the value read will always be correct. * both have been read. So the value read will always be correct.
* Set BOOT bit to refresh factory tuning values. * Set BOOT bit to refresh factory tuning values.
*/ */
lis3->read(lis3, CTRL_REG2, &reg); if (lis3->pdata) {
if (lis3->whoami == WAI_12B) lis3->read(lis3, CTRL_REG2, &reg);
reg |= CTRL2_BDU | CTRL2_BOOT; if (lis3->whoami == WAI_12B)
else reg |= CTRL2_BDU | CTRL2_BOOT;
reg |= CTRL2_BOOT_8B; else
lis3->write(lis3, CTRL_REG2, reg); reg |= CTRL2_BOOT_8B;
lis3->write(lis3, CTRL_REG2, reg);
}
/* LIS3 power on delay is quite long */ /* LIS3 power on delay is quite long */
msleep(lis3->pwron_delay / lis3lv02d_get_odr()); msleep(lis3->pwron_delay / lis3lv02d_get_odr());

View file

@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
} }
re_arm: re_arm:
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); if (!bond->kill_timers)
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }

View file

@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
} }
re_arm: re_arm:
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); if (!bond->kill_timers)
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }

View file

@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->kill_timers)
goto out;
/* rejoin all groups on bond device */ /* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond->dev); __bond_resend_igmp_join_requests(bond->dev);
@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(vlan_dev); __bond_resend_igmp_join_requests(vlan_dev);
} }
if (--bond->igmp_retrans > 0) if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }
@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
} }
re_arm: re_arm:
if (bond->params.miimon) if (bond->params.miimon && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mii_work, queue_delayed_work(bond->wq, &bond->mii_work,
msecs_to_jiffies(bond->params.miimon)); msecs_to_jiffies(bond->params.miimon));
out: out:
@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
} }
re_arm: re_arm:
if (bond->params.arp_interval) if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond); bond_ab_arp_probe(bond);
re_arm: re_arm:
if (bond->params.arp_interval) if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out: out:
read_unlock(&bond->lock); read_unlock(&bond->lock);

View file

@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,
setup_debugfs(adapter); setup_debugfs(adapter);
} }
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
if (is_offload(adapter)) if (is_offload(adapter))
attach_ulds(adapter); attach_ulds(adapter);

View file

@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
netdev->irq, rc); netdev->irq, rc);
do { do {
rc = h_free_logical_lan(adapter->vdev->unit_address); lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
goto err_out; goto err_out;
} }

View file

@ -1199,6 +1199,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
&hw->reg->INT_EN); &hw->reg->INT_EN);
pch_gbe_stop_receive(adapter); pch_gbe_stop_receive(adapter);
int_st |= ioread32(&hw->reg->INT_ST);
int_st = int_st & ioread32(&hw->reg->INT_EN);
} }
if (int_st & PCH_GBE_INT_RX_DMA_ERR) if (int_st & PCH_GBE_INT_RX_DMA_ERR)
adapter->stats.intr_rx_dma_err_count++; adapter->stats.intr_rx_dma_err_count++;
@ -1218,14 +1220,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* Set Pause packet */ /* Set Pause packet */
pch_gbe_mac_set_pause_packet(hw); pch_gbe_mac_set_pause_packet(hw);
} }
if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
== 0) {
return IRQ_HANDLED;
}
} }
/* When request status is Receive interruption */ /* When request status is Receive interruption */
if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
(adapter->rx_stop_flag == true)) {
if (likely(napi_schedule_prep(&adapter->napi))) { if (likely(napi_schedule_prep(&adapter->napi))) {
/* Enable only Rx Descriptor empty */ /* Enable only Rx Descriptor empty */
atomic_inc(&adapter->irq_sem); atomic_inc(&adapter->irq_sem);
@ -1385,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i;
unsigned int cleaned_count = 0; unsigned int cleaned_count = 0;
bool cleaned = false; bool cleaned = true;
pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
@ -1396,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
cleaned = true;
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
skb = buffer_info->skb; skb = buffer_info->skb;
@ -1439,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
/* weight of a sort for tx, to avoid endless transmit cleanup */ /* weight of a sort for tx, to avoid endless transmit cleanup */
if (cleaned_count++ == PCH_GBE_TX_WEIGHT) if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
cleaned = false;
break; break;
}
} }
pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
cleaned_count); cleaned_count);
@ -2168,7 +2168,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
{ {
struct pch_gbe_adapter *adapter = struct pch_gbe_adapter *adapter =
container_of(napi, struct pch_gbe_adapter, napi); container_of(napi, struct pch_gbe_adapter, napi);
struct net_device *netdev = adapter->netdev;
int work_done = 0; int work_done = 0;
bool poll_end_flag = false; bool poll_end_flag = false;
bool cleaned = false; bool cleaned = false;
@ -2176,33 +2175,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
pr_debug("budget : %d\n", budget); pr_debug("budget : %d\n", budget);
/* Keep link state information with original netdev */ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
if (!netif_carrier_ok(netdev)) { cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
if (!cleaned)
work_done = budget;
/* If no Tx and not enough Rx work done,
* exit the polling mode
*/
if (work_done < budget)
poll_end_flag = true; poll_end_flag = true;
} else {
pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); if (poll_end_flag) {
napi_complete(napi);
if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw);
}
pch_gbe_irq_enable(adapter);
} else
if (adapter->rx_stop_flag) { if (adapter->rx_stop_flag) {
adapter->rx_stop_flag = false; adapter->rx_stop_flag = false;
pch_gbe_start_receive(&adapter->hw); pch_gbe_start_receive(&adapter->hw);
int_en = ioread32(&adapter->hw.reg->INT_EN); int_en = ioread32(&adapter->hw.reg->INT_EN);
iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
&adapter->hw.reg->INT_EN); &adapter->hw.reg->INT_EN);
} }
cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
if (cleaned)
work_done = budget;
/* If no Tx and not enough Rx work done,
* exit the polling mode
*/
if ((work_done < budget) || !netif_running(netdev))
poll_end_flag = true;
}
if (poll_end_flag) {
napi_complete(napi);
pch_gbe_irq_enable(adapter);
}
pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
poll_end_flag, work_done, budget); poll_end_flag, work_done, budget);

View file

@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640); prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) { if (list_empty(&dp83640->rxpool)) {
pr_warning("dp83640: rx timestamp pool is empty\n"); pr_debug("dp83640: rx timestamp pool is empty\n");
goto out; goto out;
} }
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue); skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) { if (!skb) {
pr_warning("dp83640: have timestamp but tx_queue empty\n"); pr_debug("dp83640: have timestamp but tx_queue empty\n");
return; return;
} }
ns = phy2txts(phy_txts); ns = phy2txts(phy_txts);

View file

@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_get(vif); xenvif_get(vif);
rtnl_lock(); rtnl_lock();
if (netif_running(vif->dev))
xenvif_up(vif);
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN); dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev); netdev_update_features(vif->dev);
netif_carrier_on(vif->dev); netif_carrier_on(vif->dev);
if (netif_running(vif->dev))
xenvif_up(vif);
rtnl_unlock(); rtnl_unlock();
return 0; return 0;

View file

@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
/* /*
* The default CLS is used if arch didn't set CLS explicitly and not * The default CLS is used if arch didn't set CLS explicitly and not
@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str); pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) { } else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str); pci_hotplug_mem_size = memparse(str + 10, &str);
} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
pcie_bus_config = PCIE_BUS_TUNE_OFF;
} else if (!strncmp(str, "pcie_bus_safe", 13)) { } else if (!strncmp(str, "pcie_bus_safe", 13)) {
pcie_bus_config = PCIE_BUS_SAFE; pcie_bus_config = PCIE_BUS_SAFE;
} else if (!strncmp(str, "pcie_bus_perf", 13)) { } else if (!strncmp(str, "pcie_bus_perf", 13)) {
pcie_bus_config = PCIE_BUS_PERFORMANCE; pcie_bus_config = PCIE_BUS_PERFORMANCE;
} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
pcie_bus_config = PCIE_BUS_PEER2PEER;
} else { } else {
printk(KERN_ERR "PCI: Unknown option `%s'\n", printk(KERN_ERR "PCI: Unknown option `%s'\n",
str); str);

View file

@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
*/ */
void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
{ {
u8 smpss = mpss; u8 smpss;
if (!pci_is_pcie(bus->self)) if (!pci_is_pcie(bus->self))
return; return;
if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
return;
/* FIXME - Peer to peer DMA is possible, though the endpoint would need
* to be aware to the MPS of the destination. To work around this,
* simply force the MPS of the entire system to the smallest possible.
*/
if (pcie_bus_config == PCIE_BUS_PEER2PEER)
smpss = 0;
if (pcie_bus_config == PCIE_BUS_SAFE) { if (pcie_bus_config == PCIE_BUS_SAFE) {
smpss = mpss;
pcie_find_smpss(bus->self, &smpss); pcie_find_smpss(bus->self, &smpss);
pci_walk_bus(bus, pcie_find_smpss, &smpss); pci_walk_bus(bus, pcie_find_smpss, &smpss);
} }

View file

@ -50,6 +50,8 @@
#define PCH_RX_THOLD 7 #define PCH_RX_THOLD 7
#define PCH_RX_THOLD_MAX 15 #define PCH_RX_THOLD_MAX 15
#define PCH_TX_THOLD 2
#define PCH_MAX_BAUDRATE 5000000 #define PCH_MAX_BAUDRATE 5000000
#define PCH_MAX_FIFO_DEPTH 16 #define PCH_MAX_FIFO_DEPTH 16
@ -58,6 +60,7 @@
#define PCH_SLEEP_TIME 10 #define PCH_SLEEP_TIME 10
#define SSN_LOW 0x02U #define SSN_LOW 0x02U
#define SSN_HIGH 0x03U
#define SSN_NO_CONTROL 0x00U #define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF #define PCH_MAX_CS 0xFF
#define PCI_DEVICE_ID_GE_SPI 0x8816 #define PCI_DEVICE_ID_GE_SPI 0x8816
@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
/* if transfer complete interrupt */ /* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) { if (reg_spsr_val & SPSR_FI_BIT) {
if (tx_index < bpw_len) if ((tx_index == bpw_len) && (rx_index == tx_index)) {
/* disable interrupts */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
/* transfer is completed;
inform pch_spi_process_messages */
data->transfer_complete = true;
data->transfer_active = false;
wake_up(&data->wait);
} else {
dev_err(&data->master->dev, dev_err(&data->master->dev,
"%s : Transfer is not completed", __func__); "%s : Transfer is not completed", __func__);
/* disable interrupts */ }
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
/* transfer is completed;inform pch_spi_process_messages */
data->transfer_complete = true;
data->transfer_active = false;
wake_up(&data->wait);
} }
} }
@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
"%s returning due to suspend\n", __func__); "%s returning due to suspend\n", __func__);
return IRQ_NONE; return IRQ_NONE;
} }
if (data->use_dma)
return IRQ_NONE;
io_remap_addr = data->io_remap_addr; io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR; spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr); reg_spsr_val = ioread32(spsr);
if (reg_spsr_val & SPSR_ORF_BIT) if (reg_spsr_val & SPSR_ORF_BIT) {
dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
if (data->current_msg->complete != 0) {
data->transfer_complete = true;
data->current_msg->status = -EIO;
data->current_msg->complete(data->current_msg->context);
data->bcurrent_msg_processing = false;
data->current_msg = NULL;
data->cur_trans = NULL;
}
}
if (data->use_dma)
return IRQ_NONE;
/* Check if the interrupt is for SPI device */ /* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
wait_event_interruptible(data->wait, data->transfer_complete); wait_event_interruptible(data->wait, data->transfer_complete);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
dev_dbg(&data->master->dev,
"%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
/* clear all interrupts */ /* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR, pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR)); pch_spi_readreg(data->master, PCH_SPSR));
@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
} }
} }
static void pch_spi_start_transfer(struct pch_spi_data *data) static int pch_spi_start_transfer(struct pch_spi_data *data)
{ {
struct pch_spi_dma_ctrl *dma; struct pch_spi_dma_ctrl *dma;
unsigned long flags; unsigned long flags;
int rtn;
dma = &data->dma; dma = &data->dma;
@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
initiating the transfer. */ initiating the transfer. */
dev_dbg(&data->master->dev, dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__); "%s:waiting for transfer to get over\n", __func__);
wait_event_interruptible(data->wait, data->transfer_complete); rtn = wait_event_interruptible_timeout(data->wait,
data->transfer_complete,
msecs_to_jiffies(2 * HZ));
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
DMA_FROM_DEVICE);
memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
async_tx_ack(dma->desc_rx); async_tx_ack(dma->desc_rx);
async_tx_ack(dma->desc_tx); async_tx_ack(dma->desc_tx);
kfree(dma->sg_tx_p); kfree(dma->sg_tx_p);
kfree(dma->sg_rx_p); kfree(dma->sg_rx_p);
spin_lock_irqsave(&data->lock, flags); spin_lock_irqsave(&data->lock, flags);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
dev_dbg(&data->master->dev,
"%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
/* clear fifo threshold, disable interrupts, disable SPI transfer */ /* clear fifo threshold, disable interrupts, disable SPI transfer */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0, pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
pch_spi_clear_fifo(data->master); pch_spi_clear_fifo(data->master);
spin_unlock_irqrestore(&data->lock, flags); spin_unlock_irqrestore(&data->lock, flags);
return rtn;
} }
static void pch_dma_rx_complete(void *arg) static void pch_dma_rx_complete(void *arg)
@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* set receive fifo threshold and transmit fifo threshold */ /* set receive fifo threshold and transmit fifo threshold */
pch_spi_setclr_reg(data->master, PCH_SPCR, pch_spi_setclr_reg(data->master, PCH_SPCR,
((size - 1) << SPCR_RFIC_FIELD) | ((size - 1) << SPCR_RFIC_FIELD) |
((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << (PCH_TX_THOLD << SPCR_TFIC_FIELD),
SPCR_TFIC_FIELD),
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
spin_unlock_irqrestore(&data->lock, flags); spin_unlock_irqrestore(&data->lock, flags);
@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* offset, length setting */ /* offset, length setting */
sg = dma->sg_rx_p; sg = dma->sg_rx_p;
for (i = 0; i < num; i++, sg++) { for (i = 0; i < num; i++, sg++) {
if (i == 0) { if (i == (num - 2)) {
sg->offset = 0; sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
sg->offset); sg->offset);
sg_dma_len(sg) = rem; sg_dma_len(sg) = rem;
} else if (i == (num - 1)) {
sg->offset = size * (i - 1) + rem;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset);
sg_dma_len(sg) = size;
} else { } else {
sg->offset = rem + size * (i - 1); sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8); sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset); sg->offset);
@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
dma->desc_rx = desc_rx; dma->desc_rx = desc_rx;
/* TX */ /* TX */
if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
num = data->bpw_len / PCH_DMA_TRANS_SIZE;
size = PCH_DMA_TRANS_SIZE;
rem = 16;
} else {
num = 1;
size = data->bpw_len;
rem = data->bpw_len;
}
dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */ /* offset, length setting */
@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma) if (data->use_dma)
pch_spi_request_dma(data, pch_spi_request_dma(data,
data->current_msg->spi->bits_per_word); data->current_msg->spi->bits_per_word);
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
do { do {
/* If we are already processing a message get the next /* If we are already processing a message get the next
transfer structure from the message otherwise retrieve transfer structure from the message otherwise retrieve
@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma) { if (data->use_dma) {
pch_spi_handle_dma(data, &bpw); pch_spi_handle_dma(data, &bpw);
pch_spi_start_transfer(data); if (!pch_spi_start_transfer(data))
goto out;
pch_spi_copy_rx_data_for_dma(data, bpw); pch_spi_copy_rx_data_for_dma(data, bpw);
} else { } else {
pch_spi_set_tx(data, &bpw); pch_spi_set_tx(data, &bpw);
@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
} while (data->cur_trans != NULL); } while (data->cur_trans != NULL);
out:
pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
if (data->use_dma) if (data->use_dma)
pch_spi_release_dma(data); pch_spi_release_dma(data);
} }

View file

@ -621,8 +621,9 @@ struct pci_driver {
extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
enum pcie_bus_config_types { enum pcie_bus_config_types {
PCIE_BUS_PERFORMANCE, PCIE_BUS_TUNE_OFF,
PCIE_BUS_SAFE, PCIE_BUS_SAFE,
PCIE_BUS_PERFORMANCE,
PCIE_BUS_PEER2PEER, PCIE_BUS_PEER2PEER,
}; };

View file

@ -51,6 +51,7 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_EV_PORT 319 #define PTP_EV_PORT 319
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_ETYPE 12 #define OFF_ETYPE 12
#define OFF_IHL 14 #define OFF_IHL 14
@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
{OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \ {OP_RETA, 0, 0, 0 }, /* */ \
/*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \
/*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ /*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \
{OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \
{OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \
{OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
{OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \
{OP_RETA, 0, 0, 0 }, /* */ \ {OP_RETA, 0, 0, 0 }, /* */ \
/*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ /*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \
{OP_LDB, 0, 0, ETH_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \
{OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \
{OP_LDH, 0, 0, ETH_HLEN }, /* */ \ {OP_LDH, 0, 0, ETH_HLEN }, /* */ \
{OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \
{OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \

View file

@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
} }
} }
static void bcm_tx_start_timer(struct bcm_op *op)
{
if (op->kt_ival1.tv64 && op->count)
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival1),
HRTIMER_MODE_ABS);
else if (op->kt_ival2.tv64)
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival2),
HRTIMER_MODE_ABS);
}
static void bcm_tx_timeout_tsklet(unsigned long data) static void bcm_tx_timeout_tsklet(unsigned long data)
{ {
struct bcm_op *op = (struct bcm_op *)data; struct bcm_op *op = (struct bcm_op *)data;
@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
bcm_send_to_user(op, &msg_head, NULL, 0); bcm_send_to_user(op, &msg_head, NULL, 0);
} }
}
if (op->kt_ival1.tv64 && (op->count > 0)) {
/* send (next) frame */
bcm_can_tx(op); bcm_can_tx(op);
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival1),
HRTIMER_MODE_ABS);
} else { } else if (op->kt_ival2.tv64)
if (op->kt_ival2.tv64) { bcm_can_tx(op);
/* send (next) frame */ bcm_tx_start_timer(op);
bcm_can_tx(op);
hrtimer_start(&op->timer,
ktime_add(ktime_get(), op->kt_ival2),
HRTIMER_MODE_ABS);
}
}
} }
/* /*
@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_cancel(&op->timer); hrtimer_cancel(&op->timer);
} }
if ((op->flags & STARTTIMER) && if (op->flags & STARTTIMER) {
((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { hrtimer_cancel(&op->timer);
/* spec: send can_frame when starting timer */ /* spec: send can_frame when starting timer */
op->flags |= TX_ANNOUNCE; op->flags |= TX_ANNOUNCE;
if (op->kt_ival1.tv64 && (op->count > 0)) {
/* op->count-- is done in bcm_tx_timeout_handler */
hrtimer_start(&op->timer, op->kt_ival1,
HRTIMER_MODE_REL);
} else
hrtimer_start(&op->timer, op->kt_ival2,
HRTIMER_MODE_REL);
} }
if (op->flags & TX_ANNOUNCE) if (op->flags & TX_ANNOUNCE) {
bcm_can_tx(op); bcm_can_tx(op);
if (op->count)
op->count--;
}
if (op->flags & STARTTIMER)
bcm_tx_start_timer(op);
return msg_head->nframes * CFSIZ + MHSIZ; return msg_head->nframes * CFSIZ + MHSIZ;
} }

View file

@ -1383,6 +1383,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->af_specific = &tcp_sock_ipv6_mapped_specific; newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif #endif
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL; newnp->pktoptions = NULL;
newnp->opt = NULL; newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb); newnp->mcast_oif = inet6_iif(skb);
@ -1447,6 +1449,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
First: no IPv4 options. First: no IPv4 options.
*/ */
newinet->inet_opt = NULL; newinet->inet_opt = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL; newnp->ipv6_fl_list = NULL;
/* Clone RX bits */ /* Clone RX bits */

View file

@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
return 0; return 0;
drop_n_acct: drop_n_acct:
po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); spin_lock(&sk->sk_receive_queue.lock);
po->stats.tp_drops++;
atomic_inc(&sk->sk_drops);
spin_unlock(&sk->sk_receive_queue.lock);
drop_n_restore: drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) { if (skb_head != skb->data && skb_shared(skb)) {

View file

@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list, struct list_head *unmap_list,
struct list_head *kill_list); struct list_head *kill_list,
int *unpinned);
static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
LIST_HEAD(unmap_list); LIST_HEAD(unmap_list);
LIST_HEAD(kill_list); LIST_HEAD(kill_list);
unsigned long flags; unsigned long flags;
unsigned int nfreed = 0, ncleaned = 0, free_goal; unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
int ret = 0; int ret = 0;
rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
* will be destroyed by the unmap function. * will be destroyed by the unmap function.
*/ */
if (!list_empty(&unmap_list)) { if (!list_empty(&unmap_list)) {
ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
&kill_list, &unpinned);
/* If we've been asked to destroy all MRs, move those /* If we've been asked to destroy all MRs, move those
* that were simply cleaned to the kill list */ * that were simply cleaned to the kill list */
if (free_all) if (free_all)
@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
spin_unlock_irqrestore(&pool->list_lock, flags); spin_unlock_irqrestore(&pool->list_lock, flags);
} }
atomic_sub(unpinned, &pool->free_pinned);
atomic_sub(ncleaned, &pool->dirty_count); atomic_sub(ncleaned, &pool->dirty_count);
atomic_sub(nfreed, &pool->item_count); atomic_sub(nfreed, &pool->item_count);
@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
struct list_head *unmap_list, struct list_head *unmap_list,
struct list_head *kill_list) struct list_head *kill_list,
int *unpinned)
{ {
struct rds_iw_mapping *mapping, *next; struct rds_iw_mapping *mapping, *next;
unsigned int ncleaned = 0; unsigned int ncleaned = 0;
@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
spin_lock_irqsave(&pool->list_lock, flags); spin_lock_irqsave(&pool->list_lock, flags);
list_for_each_entry_safe(mapping, next, unmap_list, m_list) { list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
*unpinned += mapping->m_sg.len;
list_move(&mapping->m_list, &laundered); list_move(&mapping->m_list, &laundered);
ncleaned++; ncleaned++;
} }

View file

@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec)
/* set the update bits */ /* set the update bits */
snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100);
snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100);

View file

@ -449,7 +449,7 @@ int __devinit omap_mcpdm_probe(struct platform_device *pdev)
return ret; return ret;
} }
int __devexit omap_mcpdm_remove(struct platform_device *pdev) int omap_mcpdm_remove(struct platform_device *pdev)
{ {
struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev);

View file

@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void);
extern void omap_mcpdm_free(void); extern void omap_mcpdm_free(void);
extern int omap_mcpdm_set_offset(int offset1, int offset2); extern int omap_mcpdm_set_offset(int offset1, int offset2);
int __devinit omap_mcpdm_probe(struct platform_device *pdev); int __devinit omap_mcpdm_probe(struct platform_device *pdev);
int __devexit omap_mcpdm_remove(struct platform_device *pdev); int omap_mcpdm_remove(struct platform_device *pdev);

View file

@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card)
if (clk_pout) { if (clk_pout) {
pout = clk_get(NULL, "CLK_POUT"); pout = clk_get(NULL, "CLK_POUT");
if (IS_ERR(pout)) { if (IS_ERR(pout)) {
dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n", dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n",
PTR_ERR(pout)); PTR_ERR(pout));
return PTR_ERR(pout); return PTR_ERR(pout);
} }
ret = clk_enable(pout); ret = clk_enable(pout);
if (ret != 0) { if (ret != 0) {
dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
ret); ret);
clk_put(pout); clk_put(pout);
return ret; return ret;
} }
dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n", dev_dbg(card->dev, "MCLK enabled at %luHz\n",
clk_get_rate(pout)); clk_get_rate(pout));
} }
@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card)
if (clk_pout) { if (clk_pout) {
ret = clk_enable(pout); ret = clk_enable(pout);
if (ret != 0) if (ret != 0)
dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
ret); ret);
} }