Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

This commit is contained in:
Linus Torvalds 2006-01-31 10:21:13 -08:00
commit 7fcdf327be
13 changed files with 122 additions and 60 deletions

View file

@ -910,16 +910,18 @@ core99_gmac_phy_reset(struct device_node *node, long param, long value)
macio->type != macio_intrepid)
return -ENODEV;
printk(KERN_DEBUG "Hard reset of PHY chip ...\n");
LOCK(flags);
MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
(void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
UNLOCK(flags);
mdelay(10);
msleep(10);
LOCK(flags);
MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
KEYLARGO_GPIO_OUTOUT_DATA);
UNLOCK(flags);
mdelay(10);
msleep(10);
return 0;
}

View file

@ -1653,36 +1653,40 @@ static void gem_init_rings(struct gem *gp)
/* Init PHY interface and start link poll state machine */
static void gem_init_phy(struct gem *gp)
{
u32 mifcfg;
u32 mif_cfg;
/* Revert MIF CFG setting done on stop_phy */
mifcfg = readl(gp->regs + MIF_CFG);
mifcfg &= ~MIF_CFG_BBMODE;
writel(mifcfg, gp->regs + MIF_CFG);
mif_cfg = readl(gp->regs + MIF_CFG);
mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
mif_cfg |= MIF_CFG_MDI0;
writel(mif_cfg, gp->regs + MIF_CFG);
writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
int i;
u16 ctrl;
/* Those delay sucks, the HW seem to love them though, I'll
* serisouly consider breaking some locks here to be able
* to schedule instead
*/
for (i = 0; i < 3; i++) {
#ifdef CONFIG_PPC_PMAC
pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
msleep(20);
#endif
/* Some PHYs used by apple have problem getting back to us,
* we do an additional reset here
/* Some PHYs used by apple have problem getting back
* to us, we do an additional reset here
*/
phy_write(gp, MII_BMCR, BMCR_RESET);
msleep(20);
if (phy_read(gp, MII_BMCR) != 0xffff)
for (i = 0; i < 50; i++) {
if ((phy_read(gp, MII_BMCR) & BMCR_RESET) == 0)
break;
if (i == 2)
msleep(10);
}
if (i == 50)
printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
gp->dev->name);
}
/* Make sure isolate is off */
ctrl = phy_read(gp, MII_BMCR);
if (ctrl & BMCR_ISOLATE)
phy_write(gp, MII_BMCR, ctrl & ~BMCR_ISOLATE);
}
if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
@ -2119,7 +2123,7 @@ static void gem_reinit_chip(struct gem *gp)
/* Must be invoked with no lock held. */
static void gem_stop_phy(struct gem *gp, int wol)
{
u32 mifcfg;
u32 mif_cfg;
unsigned long flags;
/* Let the chip settle down a bit, it seems that helps
@ -2130,9 +2134,9 @@ static void gem_stop_phy(struct gem *gp, int wol)
/* Make sure we aren't polling PHY status change. We
* don't currently use that feature though
*/
mifcfg = readl(gp->regs + MIF_CFG);
mifcfg &= ~MIF_CFG_POLL;
writel(mifcfg, gp->regs + MIF_CFG);
mif_cfg = readl(gp->regs + MIF_CFG);
mif_cfg &= ~MIF_CFG_POLL;
writel(mif_cfg, gp->regs + MIF_CFG);
if (wol && gp->has_wol) {
unsigned char *e = &gp->dev->dev_addr[0];
@ -2182,7 +2186,8 @@ static void gem_stop_phy(struct gem *gp, int wol)
/* According to Apple, we must set the MDIO pins to this begnign
* state or we may 1) eat more current, 2) damage some PHYs
*/
writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
mif_cfg = 0;
writel(mif_cfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
writel(0, gp->regs + MIF_BBCLK);
writel(0, gp->regs + MIF_BBDATA);
writel(0, gp->regs + MIF_BBOENAB);

View file

@ -19,7 +19,21 @@ struct xt_get_revision
/* For standard target */
#define XT_RETURN (-NF_REPEAT - 1)
#define XT_ALIGN(s) (((s) + (__alignof__(u_int64_t)-1)) & ~(__alignof__(u_int64_t)-1))
/* this is a dummy structure to find out the alignment requirement for a struct
* containing all the fundamental data types that are used in ipt_entry,
* ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my
* personal pleasure to remove it -HW
*/
struct _xt_align
{
u_int8_t u8;
u_int16_t u16;
u_int32_t u32;
u_int64_t u64;
};
#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \
& ~(__alignof__(struct _xt_align)-1))
/* Standard return verdict, or do jump. */
#define XT_STANDARD_TARGET ""

View file

@ -2543,13 +2543,14 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
case SIOCBONDCHANGEACTIVE:
case SIOCBRADDIF:
case SIOCBRDELIF:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
/* fall through */
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
dev_load(ifr.ifr_name);
rtnl_lock();
ret = dev_ifsioc(&ifr, cmd);

View file

@ -135,13 +135,15 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int fclone)
{
kmem_cache_t *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
/* Get the HEAD */
skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache,
gfp_mask & ~__GFP_DMA);
skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
if (!skb)
goto out;
@ -180,7 +182,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
out:
return skb;
nodata:
kmem_cache_free(skbuff_head_cache, skb);
kmem_cache_free(cache, skb);
skb = NULL;
goto out;
}

View file

@ -970,7 +970,6 @@ int igmp_rcv(struct sk_buff *skb)
case IGMP_MTRACE_RESP:
break;
default:
NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type);
}
drop:

View file

@ -230,7 +230,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
tp->snd_cwnd_cnt = 0;
ca->ccount++;
}
}
}

View file

@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb)
}
} else {
for (ma = idev->mc_list; ma; ma=ma->next) {
if (group_type != IPV6_ADDR_ANY &&
!ipv6_addr_equal(group, &ma->mca_addr))
if (!ipv6_addr_equal(group, &ma->mca_addr))
continue;
spin_lock_bh(&ma->mca_lock);
if (ma->mca_flags & MAF_TIMER_RUNNING) {
@ -1271,7 +1270,6 @@ int igmp6_event_query(struct sk_buff *skb)
mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
igmp6_group_queried(ma, max_delay);
spin_unlock_bh(&ma->mca_lock);
if (group_type != IPV6_ADDR_ANY)
break;
}
}
@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
* in all filters
*/
if (psf->sf_count[MCAST_INCLUDE])
return 0;
return type == MLD2_MODE_IS_INCLUDE;
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc)
static int sf_setstate(struct ifmcaddr6 *pmc)
{
struct ip6_sf_list *psf;
struct ip6_sf_list *psf, *dpsf;
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
int qrv = pmc->idev->mc_qrv;
int new_in, rv;
@ -1978,10 +1976,50 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
!psf->sf_count[MCAST_INCLUDE];
} else
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
if (new_in != psf->sf_oldin) {
if (new_in) {
if (!psf->sf_oldin) {
struct ip6_sf_list *prev = 0;
for (dpsf=pmc->mca_tomb; dpsf;
dpsf=dpsf->sf_next) {
if (ipv6_addr_equal(&dpsf->sf_addr,
&psf->sf_addr))
break;
prev = dpsf;
}
if (dpsf) {
if (prev)
prev->sf_next = dpsf->sf_next;
else
pmc->mca_tomb = dpsf->sf_next;
kfree(dpsf);
}
psf->sf_crcount = qrv;
rv++;
}
} else if (psf->sf_oldin) {
psf->sf_crcount = 0;
/*
* add or update "delete" records if an active filter
* is now inactive
*/
for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
if (ipv6_addr_equal(&dpsf->sf_addr,
&psf->sf_addr))
break;
if (!dpsf) {
dpsf = (struct ip6_sf_list *)
kmalloc(sizeof(*dpsf), GFP_ATOMIC);
if (!dpsf)
continue;
*dpsf = *psf;
/* pmc->mca_lock held by callers */
dpsf->sf_next = pmc->mca_tomb;
pmc->mca_tomb = dpsf;
}
dpsf->sf_crcount = qrv;
rv++;
}
}
return rv;
}

View file

@ -1620,6 +1620,7 @@ static int key_notify_sa_flush(struct km_event *c)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
hdr->sadb_msg_type = SADB_FLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->pid;
hdr->sadb_msg_version = PF_KEY_V2;
@ -2385,6 +2386,7 @@ static int key_notify_policy_flush(struct km_event *c)
if (!skb_out)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->pid;
hdr->sadb_msg_version = PF_KEY_V2;

View file

@ -884,7 +884,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count > asoc->max_retrans) {
if (asoc->overall_error_count >= asoc->max_retrans) {
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@ -2122,7 +2122,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
if (attempts >= asoc->max_init_attempts) {
if (attempts > asoc->max_init_attempts) {
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
@ -4640,7 +4640,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
if (attempts < asoc->max_init_attempts) {
if (attempts <= asoc->max_init_attempts) {
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
if (!repl)
@ -4697,7 +4697,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
if (attempts < asoc->max_init_attempts) {
if (attempts <= asoc->max_init_attempts) {
repl = sctp_make_cookie_echo(asoc, NULL);
if (!repl)
return SCTP_DISPOSITION_NOMEM;

View file

@ -5426,7 +5426,7 @@ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
return err;
do_error:
if (asoc->init_err_counter + 1 >= asoc->max_init_attempts)
if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
err = -ETIMEDOUT;
else
err = -ECONNREFUSED;