iwlwifi: introduce per-queue locks
Instead of (ab)using the sta_lock, make the transport layer lock its own TX queue data structures with a lock per queue. This also unifies with the cmd queue lock. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
parent
9451ca1a31
commit
015c15e106
4 changed files with 33 additions and 22 deletions
|
@ -169,6 +169,7 @@ struct iwl_queue {
|
||||||
* @meta: array of meta data for each command/tx buffer
|
* @meta: array of meta data for each command/tx buffer
|
||||||
* @dma_addr_cmd: physical address of cmd/tx buffer array
|
* @dma_addr_cmd: physical address of cmd/tx buffer array
|
||||||
* @txb: array of per-TFD driver data
|
* @txb: array of per-TFD driver data
|
||||||
|
* lock: queue lock
|
||||||
* @time_stamp: time (in jiffies) of last read_ptr change
|
* @time_stamp: time (in jiffies) of last read_ptr change
|
||||||
* @need_update: indicates need to update read/write index
|
* @need_update: indicates need to update read/write index
|
||||||
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
|
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
|
||||||
|
@ -187,6 +188,7 @@ struct iwl_tx_queue {
|
||||||
struct iwl_device_cmd **cmd;
|
struct iwl_device_cmd **cmd;
|
||||||
struct iwl_cmd_meta *meta;
|
struct iwl_cmd_meta *meta;
|
||||||
struct sk_buff **skbs;
|
struct sk_buff **skbs;
|
||||||
|
spinlock_t lock;
|
||||||
unsigned long time_stamp;
|
unsigned long time_stamp;
|
||||||
u8 need_update;
|
u8 need_update;
|
||||||
u8 sched_retry;
|
u8 sched_retry;
|
||||||
|
|
|
@ -217,6 +217,8 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||||
{
|
{
|
||||||
struct iwl_tfd *tfd_tmp = txq->tfds;
|
struct iwl_tfd *tfd_tmp = txq->tfds;
|
||||||
|
|
||||||
|
lockdep_assert_held(&txq->lock);
|
||||||
|
|
||||||
iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
|
iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
|
||||||
|
|
||||||
/* free SKB */
|
/* free SKB */
|
||||||
|
@ -621,7 +623,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
struct iwl_device_cmd *out_cmd;
|
struct iwl_device_cmd *out_cmd;
|
||||||
struct iwl_cmd_meta *out_meta;
|
struct iwl_cmd_meta *out_meta;
|
||||||
dma_addr_t phys_addr;
|
dma_addr_t phys_addr;
|
||||||
unsigned long flags;
|
|
||||||
u32 idx;
|
u32 idx;
|
||||||
u16 copy_size, cmd_size;
|
u16 copy_size, cmd_size;
|
||||||
bool is_ct_kill = false;
|
bool is_ct_kill = false;
|
||||||
|
@ -680,10 +681,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&trans->hcmd_lock, flags);
|
spin_lock_bh(&txq->lock);
|
||||||
|
|
||||||
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||||
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
|
spin_unlock_bh(&txq->lock);
|
||||||
|
|
||||||
IWL_ERR(trans, "No space in command queue\n");
|
IWL_ERR(trans, "No space in command queue\n");
|
||||||
is_ct_kill = iwl_check_for_ct_kill(priv(trans));
|
is_ct_kill = iwl_check_for_ct_kill(priv(trans));
|
||||||
|
@ -790,7 +791,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
||||||
iwl_txq_update_write_ptr(trans, txq);
|
iwl_txq_update_write_ptr(trans, txq);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
|
spin_unlock_bh(&txq->lock);
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -809,6 +810,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
|
||||||
struct iwl_queue *q = &txq->q;
|
struct iwl_queue *q = &txq->q;
|
||||||
int nfreed = 0;
|
int nfreed = 0;
|
||||||
|
|
||||||
|
lockdep_assert_held(&txq->lock);
|
||||||
|
|
||||||
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
|
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
|
||||||
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
|
IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
|
||||||
"index %d is out of range [0-%d] %d %d.\n", __func__,
|
"index %d is out of range [0-%d] %d %d.\n", __func__,
|
||||||
|
@ -850,7 +853,6 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
|
||||||
struct iwl_cmd_meta *meta;
|
struct iwl_cmd_meta *meta;
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
|
struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* If a Tx command is being handled and it isn't in the actual
|
/* If a Tx command is being handled and it isn't in the actual
|
||||||
* command queue then there a command routing bug has been introduced
|
* command queue then there a command routing bug has been introduced
|
||||||
|
@ -864,6 +866,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&txq->lock);
|
||||||
|
|
||||||
cmd_index = get_cmd_index(&txq->q, index);
|
cmd_index = get_cmd_index(&txq->q, index);
|
||||||
cmd = txq->cmd[cmd_index];
|
cmd = txq->cmd[cmd_index];
|
||||||
meta = &txq->meta[cmd_index];
|
meta = &txq->meta[cmd_index];
|
||||||
|
@ -880,8 +884,6 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
|
||||||
rxb->page = NULL;
|
rxb->page = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&trans->hcmd_lock, flags);
|
|
||||||
|
|
||||||
iwl_hcmd_queue_reclaim(trans, txq_id, index);
|
iwl_hcmd_queue_reclaim(trans, txq_id, index);
|
||||||
|
|
||||||
if (!(meta->flags & CMD_ASYNC)) {
|
if (!(meta->flags & CMD_ASYNC)) {
|
||||||
|
@ -898,7 +900,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
|
||||||
|
|
||||||
meta->flags = 0;
|
meta->flags = 0;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&trans->hcmd_lock, flags);
|
spin_unlock(&txq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
|
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
|
||||||
|
@ -1041,6 +1043,8 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
|
||||||
if (WARN_ON(txq_id == trans->shrd->cmd_queue))
|
if (WARN_ON(txq_id == trans->shrd->cmd_queue))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
lockdep_assert_held(&txq->lock);
|
||||||
|
|
||||||
/*Since we free until index _not_ inclusive, the one before index is
|
/*Since we free until index _not_ inclusive, the one before index is
|
||||||
* the last we will free. This one must be used */
|
* the last we will free. This one must be used */
|
||||||
last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
|
last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
|
||||||
|
|
|
@ -390,6 +390,8 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
spin_lock_init(&txq->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tell nic where to find circular buffer of Tx Frame Descriptors for
|
* Tell nic where to find circular buffer of Tx Frame Descriptors for
|
||||||
* given Tx queue, and enable the DMA channel used for that queue.
|
* given Tx queue, and enable the DMA channel used for that queue.
|
||||||
|
@ -409,8 +411,6 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
|
||||||
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
||||||
struct iwl_queue *q = &txq->q;
|
struct iwl_queue *q = &txq->q;
|
||||||
enum dma_data_direction dma_dir;
|
enum dma_data_direction dma_dir;
|
||||||
unsigned long flags;
|
|
||||||
spinlock_t *lock;
|
|
||||||
|
|
||||||
if (!q->n_bd)
|
if (!q->n_bd)
|
||||||
return;
|
return;
|
||||||
|
@ -418,22 +418,19 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
|
||||||
/* In the command queue, all the TBs are mapped as BIDI
|
/* In the command queue, all the TBs are mapped as BIDI
|
||||||
* so unmap them as such.
|
* so unmap them as such.
|
||||||
*/
|
*/
|
||||||
if (txq_id == trans->shrd->cmd_queue) {
|
if (txq_id == trans->shrd->cmd_queue)
|
||||||
dma_dir = DMA_BIDIRECTIONAL;
|
dma_dir = DMA_BIDIRECTIONAL;
|
||||||
lock = &trans->hcmd_lock;
|
else
|
||||||
} else {
|
|
||||||
dma_dir = DMA_TO_DEVICE;
|
dma_dir = DMA_TO_DEVICE;
|
||||||
lock = &trans->shrd->sta_lock;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irqsave(lock, flags);
|
spin_lock_bh(&txq->lock);
|
||||||
while (q->write_ptr != q->read_ptr) {
|
while (q->write_ptr != q->read_ptr) {
|
||||||
/* The read_ptr needs to bound by q->n_window */
|
/* The read_ptr needs to bound by q->n_window */
|
||||||
iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
|
iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
|
||||||
dma_dir);
|
dma_dir);
|
||||||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(lock, flags);
|
spin_unlock_bh(&txq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1358,6 +1355,8 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
txq = &trans_pcie->txq[txq_id];
|
txq = &trans_pcie->txq[txq_id];
|
||||||
q = &txq->q;
|
q = &txq->q;
|
||||||
|
|
||||||
|
spin_lock(&txq->lock);
|
||||||
|
|
||||||
/* In AGG mode, the index in the ring must correspond to the WiFi
|
/* In AGG mode, the index in the ring must correspond to the WiFi
|
||||||
* sequence number. This is a HW requirements to help the SCD to parse
|
* sequence number. This is a HW requirements to help the SCD to parse
|
||||||
* the BA.
|
* the BA.
|
||||||
|
@ -1404,7 +1403,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
&dev_cmd->hdr, firstlen,
|
&dev_cmd->hdr, firstlen,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
|
if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
|
||||||
return -1;
|
goto out_err;
|
||||||
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
||||||
dma_unmap_len_set(out_meta, len, firstlen);
|
dma_unmap_len_set(out_meta, len, firstlen);
|
||||||
|
|
||||||
|
@ -1426,7 +1425,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
dma_unmap_addr(out_meta, mapping),
|
dma_unmap_addr(out_meta, mapping),
|
||||||
dma_unmap_len(out_meta, len),
|
dma_unmap_len(out_meta, len),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
return -1;
|
goto out_err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1481,7 +1480,11 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
iwl_stop_queue(trans, txq, "Queue is full");
|
iwl_stop_queue(trans, txq, "Queue is full");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
spin_unlock(&txq->lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
out_err:
|
||||||
|
spin_unlock(&txq->lock);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
|
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
|
||||||
|
@ -1560,6 +1563,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
|
||||||
int tfd_num = ssn & (txq->q.n_bd - 1);
|
int tfd_num = ssn & (txq->q.n_bd - 1);
|
||||||
int freed = 0;
|
int freed = 0;
|
||||||
|
|
||||||
|
spin_lock(&txq->lock);
|
||||||
|
|
||||||
txq->time_stamp = jiffies;
|
txq->time_stamp = jiffies;
|
||||||
|
|
||||||
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
|
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
|
||||||
|
@ -1574,6 +1579,7 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
|
IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
|
||||||
"agg_txq[sta_id[tid] %d", txq_id,
|
"agg_txq[sta_id[tid] %d", txq_id,
|
||||||
trans_pcie->agg_txq[sta_id][tid]);
|
trans_pcie->agg_txq[sta_id][tid]);
|
||||||
|
spin_unlock(&txq->lock);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1587,6 +1593,8 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
|
||||||
status != TX_STATUS_FAIL_PASSIVE_NO_RX))
|
status != TX_STATUS_FAIL_PASSIVE_NO_RX))
|
||||||
iwl_wake_queue(trans, txq, "Packets reclaimed");
|
iwl_wake_queue(trans, txq, "Packets reclaimed");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock(&txq->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2267,7 +2275,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
|
||||||
trans->ops = &trans_ops_pcie;
|
trans->ops = &trans_ops_pcie;
|
||||||
trans->shrd = shrd;
|
trans->shrd = shrd;
|
||||||
trans_pcie->trans = trans;
|
trans_pcie->trans = trans;
|
||||||
spin_lock_init(&trans->hcmd_lock);
|
|
||||||
spin_lock_init(&trans_pcie->irq_lock);
|
spin_lock_init(&trans_pcie->irq_lock);
|
||||||
|
|
||||||
/* W/A - seems to solve weird behavior. We need to remove this if we
|
/* W/A - seems to solve weird behavior. We need to remove this if we
|
||||||
|
|
|
@ -309,7 +309,6 @@ enum iwl_trans_state {
|
||||||
* @ops - pointer to iwl_trans_ops
|
* @ops - pointer to iwl_trans_ops
|
||||||
* @op_mode - pointer to the op_mode
|
* @op_mode - pointer to the op_mode
|
||||||
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
|
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
|
||||||
* @hcmd_lock: protects HCMD
|
|
||||||
* @reg_lock - protect hw register access
|
* @reg_lock - protect hw register access
|
||||||
* @dev - pointer to struct device * that represents the device
|
* @dev - pointer to struct device * that represents the device
|
||||||
* @irq - the irq number for the device
|
* @irq - the irq number for the device
|
||||||
|
@ -326,7 +325,6 @@ struct iwl_trans {
|
||||||
struct iwl_op_mode *op_mode;
|
struct iwl_op_mode *op_mode;
|
||||||
struct iwl_shared *shrd;
|
struct iwl_shared *shrd;
|
||||||
enum iwl_trans_state state;
|
enum iwl_trans_state state;
|
||||||
spinlock_t hcmd_lock;
|
|
||||||
spinlock_t reg_lock;
|
spinlock_t reg_lock;
|
||||||
|
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
Loading…
Reference in a new issue