Merge branch 'macb-coding-style'

Moritz Fischer says:

====================
macb: Codingstyle cleanups

resending almost unchanged v2 here:

Changes from v2:
* Rebased onto net-next
* Changed 5th patches commit message
* Added Nicholas' and Michal's Acked-Bys

Changes from v1:
* Backed out variable scope changes
* Separated out ether_addr_copy into it's own commit
* Fixed typo in comments as suggested by Joe
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-04-04 16:16:36 -04:00
commit acf195a9f5

View file

@ -61,8 +61,7 @@
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
/*
* Graceful stop timeouts in us. We should allow up to
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
@ -130,9 +129,8 @@ static void hw_writel(struct macb *bp, int offset, u32 value)
writel_relaxed(value, bp->regs + offset);
}
/*
* Find the CPU endianness by using the loopback bit of NCR register. When the
* CPU is in big endian we need to program swaped mode for management
/* Find the CPU endianness by using the loopback bit of NCR register. When the
* CPU is in big endian we need to program swapped mode for management
* descriptor access.
*/
static bool hw_is_native_io(void __iomem *addr)
@ -189,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp)
pdata = dev_get_platdata(&bp->pdev->dev);
/* Check all 4 address register for vaild address */
/* Check all 4 address register for valid address */
for (i = 0; i < 4; i++) {
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
top = macb_or_gem_readl(bp, SA1T + i * 8);
@ -297,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
ferr = DIV_ROUND_UP(ferr, rate / 100000);
if (ferr > 5)
netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
rate);
rate);
if (clk_set_rate(clk, rate_rounded))
netdev_err(dev, "adjusting tx_clk failed.\n");
@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev)
pdata = dev_get_platdata(&bp->pdev->dev);
if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
"phy int");
if (!ret) {
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
@ -430,7 +429,7 @@ static int macb_mii_init(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(MPE));
bp->mii_bus = mdiobus_alloc();
if (bp->mii_bus == NULL) {
if (!bp->mii_bus) {
err = -ENOMEM;
goto err_out;
}
@ -439,7 +438,7 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->dev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp)
err = of_mdiobus_register(bp->mii_bus, np);
/* fallback to standard phy registration if no phy were
found during dt phy registration */
* found during dt phy registration
*/
if (!err && !phy_find_first(bp->mii_bus)) {
for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *phydev;
@ -499,7 +499,7 @@ static void macb_update_stats(struct macb *bp)
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
for(; p < end; p++, offset += 4)
for (; p < end; p++, offset += 4)
*p += bp->macb_reg_readl(bp, offset);
}
@ -567,8 +567,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Make sure nobody is trying to queue up new packets */
netif_tx_stop_all_queues(bp->dev);
/*
* Stop transmission now
/* Stop transmission now
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
@ -576,8 +575,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n");
/*
* Treat frames in TX queue including the ones that caused the error.
/* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
*/
for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
@ -607,10 +605,9 @@ static void macb_tx_error_task(struct work_struct *work)
bp->stats.tx_bytes += skb->len;
}
} else {
/*
* "Buffers exhausted mid-frame" errors may only happen
* if the driver is buggy, so complain loudly about those.
* Statistics are updated by hardware.
/* "Buffers exhausted mid-frame" errors may only happen
* if the driver is buggy, so complain loudly about
* those. Statistics are updated by hardware.
*/
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
netdev_err(bp->dev,
@ -662,7 +659,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue_writel(queue, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
(unsigned long)status);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head; tail++) {
@ -722,7 +719,8 @@ static void gem_rx_refill(struct macb *bp)
struct sk_buff *skb;
dma_addr_t paddr;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
RX_RING_SIZE) > 0) {
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
@ -730,10 +728,10 @@ static void gem_rx_refill(struct macb *bp)
bp->rx_prepared_head++;
if (bp->rx_skbuff[entry] == NULL) {
if (!bp->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
if (unlikely(skb == NULL)) {
if (unlikely(!skb)) {
netdev_err(bp->dev,
"Unable to allocate sk_buff\n");
break;
@ -741,7 +739,8 @@ static void gem_rx_refill(struct macb *bp)
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
bp->rx_buffer_size, DMA_FROM_DEVICE);
bp->rx_buffer_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, paddr)) {
dev_kfree_skb(skb);
break;
@ -766,7 +765,7 @@ static void gem_rx_refill(struct macb *bp)
wmb();
netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
bp->rx_prepared_head, bp->rx_tail);
bp->rx_prepared_head, bp->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
@ -777,14 +776,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
for (frag = begin; frag != end; frag++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
}
/* Make descriptor updates visible to hardware */
wmb();
/*
* When this happens, the hardware stats registers for
/* When this happens, the hardware stats registers for
* whatever caused this is updated, so we don't have to record
* anything.
*/
@ -880,11 +879,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
macb_rx_ring_wrap(first_frag),
macb_rx_ring_wrap(last_frag), len);
macb_rx_ring_wrap(first_frag),
macb_rx_ring_wrap(last_frag), len);
/*
* The ethernet header starts NET_IP_ALIGN bytes into the
/* The ethernet header starts NET_IP_ALIGN bytes into the
* first buffer. Since the header is 14 bytes, this makes the
* payload word-aligned.
*
@ -924,7 +922,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
macb_rx_buffer(bp, frag), frag_len);
macb_rx_buffer(bp, frag),
frag_len);
offset += bp->rx_buffer_size;
desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
@ -942,7 +941,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
bp->stats.rx_packets++;
bp->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
@ -1049,7 +1048,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
(unsigned long)status, budget);
work_done = bp->macbgem_ops.mog_rx(bp, budget);
if (work_done < budget) {
@ -1099,8 +1098,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
(unsigned long)status);
if (status & MACB_RX_INT_FLAGS) {
/*
* There's no point taking any more interrupts
/* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
@ -1129,8 +1127,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(queue);
/*
* Link change detection isn't possible with RMII, so we'll
/* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@ -1161,8 +1158,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
/*
* TODO: Reset the hardware, and maybe move the
/* TODO: Reset the hardware, and maybe move the
* netdev_err to a lower-priority context as well
* (work queue?)
*/
@ -1181,8 +1177,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling receive - used by netconsole and other diagnostic tools
/* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void macb_poll_controller(struct net_device *dev)
@ -1268,7 +1263,7 @@ static unsigned int macb_tx_map(struct macb *bp,
}
/* Should never happen */
if (unlikely(tx_skb == NULL)) {
if (unlikely(!tx_skb)) {
netdev_err(bp->dev, "BUG! empty skb!\n");
return 0;
}
@ -1338,16 +1333,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
"start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
queue_index, skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
"start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
queue_index, skb->len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true);
#endif
/* Count how many TX buffer descriptors are needed to send this
* socket buffer: skb fragments of jumbo frames may need to be
* splitted into many buffer descriptors.
* split into many buffer descriptors.
*/
count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
@ -1398,8 +1393,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
netdev_dbg(bp->dev,
"RX buffer must be multiple of %d bytes, expanding\n",
RX_BUFFER_MULTIPLE);
"RX buffer must be multiple of %d bytes, expanding\n",
RX_BUFFER_MULTIPLE);
bp->rx_buffer_size =
roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
}
@ -1422,7 +1417,7 @@ static void gem_free_rx_buffers(struct macb *bp)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = bp->rx_skbuff[i];
if (skb == NULL)
if (!skb)
continue;
desc = &bp->rx_ring[i];
@ -1478,10 +1473,10 @@ static int gem_alloc_rx_buffers(struct macb *bp)
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
if (!bp->rx_skbuff)
return -ENOMEM;
else
netdev_dbg(bp->dev,
"Allocated %d RX struct sk_buff entries at %p\n",
RX_RING_SIZE, bp->rx_skbuff);
netdev_dbg(bp->dev,
"Allocated %d RX struct sk_buff entries at %p\n",
RX_RING_SIZE, bp->rx_skbuff);
return 0;
}
@ -1494,10 +1489,10 @@ static int macb_alloc_rx_buffers(struct macb *bp)
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
return -ENOMEM;
else
netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
}
@ -1588,8 +1583,7 @@ static void macb_reset_hw(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
/*
* Disable RX and TX (XXX: Should we halt the transmission
/* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
macb_writel(bp, NCR, 0);
@ -1652,8 +1646,7 @@ static u32 macb_mdc_clk_div(struct macb *bp)
return config;
}
/*
* Get the DMA bus width field of the network configuration register that we
/* Get the DMA bus width field of the network configuration register that we
* should program. We find the width from decoding the design configuration
* register to find the maximum supported data bus width.
*/
@ -1673,8 +1666,7 @@ static u32 macb_dbw(struct macb *bp)
}
}
/*
* Configure the receive DMA engine
/* Configure the receive DMA engine
* - use the correct receive buffer size
* - set best burst length for DMA operations
* (if not supported by FIFO, it will fallback to default)
@ -1762,8 +1754,7 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
}
/*
* The hash address register is 64 bits long and takes up two
/* The hash address register is 64 bits long and takes up two
* locations in the memory map. The least significant bits are stored
* in EMAC_HSL and the most significant bits in EMAC_HSH.
*
@ -1803,9 +1794,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr)
return 0;
}
/*
* Return the hash index value for the specified address.
*/
/* Return the hash index value for the specified address. */
static int hash_get_index(__u8 *addr)
{
int i, j, bitval;
@ -1821,9 +1810,7 @@ static int hash_get_index(__u8 *addr)
return hash_index;
}
/*
* Add multicast addresses to the internal multicast-hash table.
*/
/* Add multicast addresses to the internal multicast-hash table. */
static void macb_sethashtable(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@ -1831,7 +1818,8 @@ static void macb_sethashtable(struct net_device *dev)
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
mc_filter[0] = mc_filter[1] = 0;
mc_filter[0] = 0;
mc_filter[1] = 0;
netdev_for_each_mc_addr(ha, dev) {
bitnr = hash_get_index(ha->addr);
@ -1842,9 +1830,7 @@ static void macb_sethashtable(struct net_device *dev)
macb_or_gem_writel(bp, HRT, mc_filter[1]);
}
/*
* Enable/Disable promiscuous and multicast modes.
*/
/* Enable/Disable promiscuous and multicast modes. */
static void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
@ -2161,9 +2147,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
regs_buff[12] = macb_or_gem_readl(bp, USRIO);
if (macb_is_gem(bp)) {
if (macb_is_gem(bp))
regs_buff[13] = gem_readl(bp, DMACFG);
}
}
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@ -2286,11 +2271,11 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_set_features = macb_set_features,
};
/*
* Configure peripheral capabilities according to device tree
/* Configure peripheral capabilities according to device tree
* and integration options used
*/
static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
u32 dcfg;
@ -2988,7 +2973,7 @@ static int macb_probe(struct platform_device *pdev)
mac = of_get_mac_address(np);
if (mac)
memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
ether_addr_copy(bp->dev->dev_addr, mac);
else
macb_get_hwaddr(bp);
@ -2996,6 +2981,7 @@ static int macb_probe(struct platform_device *pdev)
phy_node = of_get_next_available_child(np, NULL);
if (phy_node) {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
if (gpio_is_valid(gpio)) {
bp->reset_gpio = gpio_to_desc(gpio);
gpiod_direction_output(bp->reset_gpio, 1);