bitops: rename for_each_bit() to for_each_set_bit()
Rename for_each_bit to for_each_set_bit in the kernel source tree. To permit for_each_clear_bit(), should that ever be added. The patch includes a macro to map the old for_each_bit() onto the new for_each_set_bit(). This is a (very) temporary thing to ease the migration. [akpm@linux-foundation.org: add temporary for_each_bit()] Suggested-by: Alexey Dobriyan <adobriyan@gmail.com> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Russell King <rmk@arm.linux.org.uk> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Artem Bityutskiy <dedekind@infradead.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e3cb91ce1a
commit
984b3f5746
18 changed files with 26 additions and 24 deletions
|
@ -676,7 +676,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
|||
if (c->weight != w)
|
||||
continue;
|
||||
|
||||
for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
|
||||
for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
|
||||
if (!test_bit(j, used_mask))
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -757,7 +757,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|||
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
ack = status;
|
||||
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||
struct perf_event *event = cpuc->events[bit];
|
||||
|
||||
clear_bit(bit, (unsigned long *) &status);
|
||||
|
|
|
@ -71,7 +71,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
|
|||
}
|
||||
|
||||
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
|
||||
for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
|
||||
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
|
||||
chan = ioat_chan_by_index(instance, bit);
|
||||
tasklet_schedule(&chan->cleanup_task);
|
||||
}
|
||||
|
|
|
@ -219,7 +219,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
|
|||
if (pending == 0)
|
||||
continue;
|
||||
|
||||
for_each_bit(offset, &pending, PL061_GPIO_NR)
|
||||
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
|
||||
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
|
||||
}
|
||||
desc->chip->unmask(irq);
|
||||
|
|
|
@ -175,7 +175,7 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
|
|||
ipr = ioread32(tgpio->membase + TGPIO_IPR);
|
||||
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
|
||||
|
||||
for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
|
||||
for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
|
||||
generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
|
||||
}
|
||||
|
||||
|
|
|
@ -497,13 +497,13 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
|
|||
int i;
|
||||
|
||||
if (abort_source & DW_IC_TX_ABRT_NOACK) {
|
||||
for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
|
||||
for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
|
||||
dev_dbg(dev->dev,
|
||||
"%s: %s\n", __func__, abort_sources[i]);
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
|
||||
for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
|
||||
dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
|
||||
|
||||
if (abort_source & DW_IC_TX_ARB_LOST)
|
||||
|
|
|
@ -108,7 +108,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc)
|
|||
ack_irqs(ei);
|
||||
/* Process all set pins. */
|
||||
readval &= ei->irqs_enabled;
|
||||
for_each_bit(irqpin, &readval, ei->nirqs) {
|
||||
for_each_set_bit(irqpin, &readval, ei->nirqs) {
|
||||
/* Run irq handler */
|
||||
pr_debug("got IRQ %d\n", irqpin);
|
||||
irq = ei->irq_start + irqpin;
|
||||
|
|
|
@ -475,7 +475,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (skb->data[0] == 0xff) {
|
||||
/* we are being asked to broadcast to all partitions */
|
||||
for_each_bit(dest_partid, xpnet_broadcast_partitions,
|
||||
for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
|
||||
xp_max_npartitions) {
|
||||
|
||||
xpnet_send(skb, queued_msg, start_addr, end_addr,
|
||||
|
|
|
@ -998,7 +998,7 @@ static int gfar_probe(struct of_device *ofdev,
|
|||
}
|
||||
|
||||
/* Need to reverse the bit maps as bit_map's MSB is q0
|
||||
* but, for_each_bit parses from right to left, which
|
||||
* but, for_each_set_bit parses from right to left, which
|
||||
* basically reverses the queue numbers */
|
||||
for (i = 0; i< priv->num_grps; i++) {
|
||||
priv->gfargrp[i].tx_bit_map = reverse_bitmap(
|
||||
|
@ -1011,7 +1011,7 @@ static int gfar_probe(struct of_device *ofdev,
|
|||
* also assign queues to groups */
|
||||
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
|
||||
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
|
||||
for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
|
||||
for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
|
||||
priv->num_rx_queues) {
|
||||
priv->gfargrp[grp_idx].num_rx_queues++;
|
||||
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
|
||||
|
@ -1019,7 +1019,7 @@ static int gfar_probe(struct of_device *ofdev,
|
|||
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
|
||||
}
|
||||
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
|
||||
for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
|
||||
for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
|
||||
priv->num_tx_queues) {
|
||||
priv->gfargrp[grp_idx].num_tx_queues++;
|
||||
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
|
||||
|
@ -1709,7 +1709,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
|
|||
|
||||
if (priv->mode == MQ_MG_MODE) {
|
||||
baddr = ®s->txic0;
|
||||
for_each_bit (i, &tx_mask, priv->num_tx_queues) {
|
||||
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
|
||||
if (likely(priv->tx_queue[i]->txcoalescing)) {
|
||||
gfar_write(baddr + i, 0);
|
||||
gfar_write(baddr + i, priv->tx_queue[i]->txic);
|
||||
|
@ -1717,7 +1717,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
|
|||
}
|
||||
|
||||
baddr = ®s->rxic0;
|
||||
for_each_bit (i, &rx_mask, priv->num_rx_queues) {
|
||||
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
|
||||
if (likely(priv->rx_queue[i]->rxcoalescing)) {
|
||||
gfar_write(baddr + i, 0);
|
||||
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
|
||||
|
@ -2607,7 +2607,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
|
|||
budget_per_queue = left_over_budget/num_queues;
|
||||
left_over_budget = 0;
|
||||
|
||||
for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
|
||||
for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
|
||||
if (test_bit(i, &serviced_queues))
|
||||
continue;
|
||||
rx_queue = priv->rx_queue[i];
|
||||
|
|
|
@ -1050,7 +1050,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
|
|||
*/
|
||||
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
|
||||
q_vector = adapter->q_vector[v_idx];
|
||||
/* XXX for_each_bit(...) */
|
||||
/* XXX for_each_set_bit(...) */
|
||||
r_idx = find_first_bit(q_vector->rxr_idx,
|
||||
adapter->num_rx_queues);
|
||||
|
||||
|
|
|
@ -751,7 +751,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
|
|||
*/
|
||||
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
|
||||
q_vector = adapter->q_vector[v_idx];
|
||||
/* XXX for_each_bit(...) */
|
||||
/* XXX for_each_set_bit(...) */
|
||||
r_idx = find_first_bit(q_vector->rxr_idx,
|
||||
adapter->num_rx_queues);
|
||||
|
||||
|
|
|
@ -394,7 +394,7 @@ static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
|
|||
ieee80211_tx_status_irqsafe(ar->hw, skb);
|
||||
}
|
||||
|
||||
for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) {
|
||||
for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
|
||||
#ifdef AR9170_QUEUE_STOP_DEBUG
|
||||
printk(KERN_DEBUG "%s: wake queue %d\n",
|
||||
wiphy_name(ar->hw->wiphy), i);
|
||||
|
|
|
@ -89,7 +89,7 @@ static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
|
|||
for (i = 0; i < __IWM_DM_NR; i++)
|
||||
iwm->dbg.dbg_module[i] = 0;
|
||||
|
||||
for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
|
||||
for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
|
||||
iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1116,7 +1116,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
|
||||
for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
|
||||
tid_info = &sta_info->tid_info[bit];
|
||||
|
||||
mutex_lock(&tid_info->mutex);
|
||||
|
|
|
@ -457,7 +457,7 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
|
|||
break;
|
||||
}
|
||||
dchunk = (struct ocfs2_local_disk_chunk *)hbh->b_data;
|
||||
for_each_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) {
|
||||
for_each_set_bit(bit, rchunk->rc_bitmap, ol_chunk_entries(sb)) {
|
||||
qbh = NULL;
|
||||
status = ocfs2_read_quota_block(lqinode,
|
||||
ol_dqblk_block(sb, chunk, bit),
|
||||
|
|
|
@ -16,11 +16,13 @@
|
|||
*/
|
||||
#include <asm/bitops.h>
|
||||
|
||||
#define for_each_bit(bit, addr, size) \
|
||||
#define for_each_set_bit(bit, addr, size) \
|
||||
for ((bit) = find_first_bit((addr), (size)); \
|
||||
(bit) < (size); \
|
||||
(bit) = find_next_bit((addr), (size), (bit) + 1))
|
||||
|
||||
/* Temporary */
|
||||
#define for_each_bit(bit, addr, size) for_each_set_bit(bit, addr, size)
|
||||
|
||||
static __inline__ int get_bitmask_order(unsigned int count)
|
||||
{
|
||||
|
|
|
@ -47,7 +47,7 @@ static int convert_prio(int prio)
|
|||
}
|
||||
|
||||
#define for_each_cpupri_active(array, idx) \
|
||||
for_each_bit(idx, array, CPUPRI_NR_PRIORITIES)
|
||||
for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES)
|
||||
|
||||
/**
|
||||
* cpupri_find - find the best (lowest-pri) CPU in the system
|
||||
|
|
|
@ -137,7 +137,7 @@ static void uda1380_flush_work(struct work_struct *work)
|
|||
{
|
||||
int bit, reg;
|
||||
|
||||
for_each_bit(bit, &uda1380_cache_dirty, UDA1380_CACHEREGNUM - 0x10) {
|
||||
for_each_set_bit(bit, &uda1380_cache_dirty, UDA1380_CACHEREGNUM - 0x10) {
|
||||
reg = 0x10 + bit;
|
||||
pr_debug("uda1380: flush reg %x val %x:\n", reg,
|
||||
uda1380_read_reg_cache(uda1380_codec, reg));
|
||||
|
|
Loading…
Reference in a new issue