iwlwifi: tid_data logic move to upper layer - tx AGG stop
The tid_data is not related to the transport layer, so move the logic that depends on it to the upper layer. This patch deals with tx AGG stop. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
This commit is contained in:
parent
1f40e145eb
commit
bc23773059
5 changed files with 90 additions and 102 deletions
|
@ -395,6 +395,77 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
|
struct ieee80211_sta *sta, u16 tid)
|
||||||
|
{
|
||||||
|
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
|
||||||
|
struct iwl_tid_data *tid_data;
|
||||||
|
unsigned long flags;
|
||||||
|
int sta_id;
|
||||||
|
|
||||||
|
sta_id = iwl_sta_id(sta);
|
||||||
|
|
||||||
|
if (sta_id == IWL_INVALID_STATION) {
|
||||||
|
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||||
|
|
||||||
|
tid_data = &priv->shrd->tid_data[sta_id][tid];
|
||||||
|
|
||||||
|
switch (priv->shrd->tid_data[sta_id][tid].agg.state) {
|
||||||
|
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
||||||
|
/*
|
||||||
|
* This can happen if the peer stops aggregation
|
||||||
|
* again before we've had a chance to drain the
|
||||||
|
* queue we selected previously, i.e. before the
|
||||||
|
* session was really started completely.
|
||||||
|
*/
|
||||||
|
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
|
||||||
|
goto turn_off;
|
||||||
|
case IWL_AGG_ON:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
IWL_WARN(priv, "Stopping AGG while state not ON "
|
||||||
|
"or starting for %d on %d (%d)\n", sta_id, tid,
|
||||||
|
priv->shrd->tid_data[sta_id][tid].agg.state);
|
||||||
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
|
||||||
|
|
||||||
|
/* There are still packets for this RA / TID in the HW */
|
||||||
|
if (tid_data->agg.ssn != tid_data->next_reclaimed) {
|
||||||
|
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
|
||||||
|
"next_recl = %d",
|
||||||
|
tid_data->agg.ssn,
|
||||||
|
tid_data->next_reclaimed);
|
||||||
|
priv->shrd->tid_data[sta_id][tid].agg.state =
|
||||||
|
IWL_EMPTYING_HW_QUEUE_DELBA;
|
||||||
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
|
||||||
|
tid_data->agg.ssn);
|
||||||
|
turn_off:
|
||||||
|
priv->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
|
||||||
|
|
||||||
|
/* do not restore/save irqs */
|
||||||
|
spin_unlock(&priv->shrd->sta_lock);
|
||||||
|
spin_lock(&priv->shrd->lock);
|
||||||
|
|
||||||
|
iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
||||||
|
|
||||||
|
iwl_stop_tx_ba_trans_ready(priv, vif_priv->ctx->ctxid, sta_id, tid);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
|
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
|
||||||
{
|
{
|
||||||
|
@ -428,23 +499,6 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|
||||||
struct ieee80211_sta *sta, u16 tid)
|
|
||||||
{
|
|
||||||
int sta_id;
|
|
||||||
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
|
|
||||||
|
|
||||||
sta_id = iwl_sta_id(sta);
|
|
||||||
|
|
||||||
if (sta_id == IWL_INVALID_STATION) {
|
|
||||||
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
|
|
||||||
return -ENXIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
|
|
||||||
sta_id, tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
|
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
|
||||||
struct iwl_rxon_context *ctx,
|
struct iwl_rxon_context *ctx,
|
||||||
const u8 *addr1)
|
const u8 *addr1)
|
||||||
|
|
|
@ -280,10 +280,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
|
||||||
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||||
struct iwl_tx_queue *txq,
|
struct iwl_tx_queue *txq,
|
||||||
u16 byte_cnt);
|
u16 byte_cnt);
|
||||||
void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
|
|
||||||
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
|
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx, int sta_id,
|
int sta_id, int tid);
|
||||||
int tid);
|
|
||||||
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
|
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
|
||||||
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||||
struct iwl_tx_queue *txq,
|
struct iwl_tx_queue *txq,
|
||||||
|
|
|
@ -578,9 +578,23 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
|
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
/* TODO: the transport layer shouldn't access the tid_data */
|
||||||
|
int txq_id = trans->shrd->tid_data[sta_id][tid].agg.txq_id;
|
||||||
|
|
||||||
|
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
||||||
|
(IWLAGN_FIRST_AMPDU_QUEUE +
|
||||||
|
hw_params(trans).num_ampdu_queues <= txq_id)) {
|
||||||
|
IWL_ERR(trans,
|
||||||
|
"queue number out of range: %d, must be %d to %d\n",
|
||||||
|
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
||||||
|
IWLAGN_FIRST_AMPDU_QUEUE +
|
||||||
|
hw_params(trans).num_ampdu_queues - 1);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
||||||
|
|
||||||
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
|
iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
|
||||||
|
@ -593,82 +607,6 @@ void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
|
||||||
iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
|
iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||||
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
|
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
|
||||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
|
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
|
|
||||||
enum iwl_rxon_context_id ctx, int sta_id,
|
|
||||||
int tid)
|
|
||||||
{
|
|
||||||
struct iwl_tid_data *tid_data;
|
|
||||||
unsigned long flags;
|
|
||||||
int txq_id;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
|
|
||||||
|
|
||||||
tid_data = &trans->shrd->tid_data[sta_id][tid];
|
|
||||||
txq_id = tid_data->agg.txq_id;
|
|
||||||
|
|
||||||
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
|
||||||
(IWLAGN_FIRST_AMPDU_QUEUE +
|
|
||||||
hw_params(trans).num_ampdu_queues <= txq_id)) {
|
|
||||||
IWL_ERR(trans,
|
|
||||||
"queue number out of range: %d, must be %d to %d\n",
|
|
||||||
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
|
||||||
IWLAGN_FIRST_AMPDU_QUEUE +
|
|
||||||
hw_params(trans).num_ampdu_queues - 1);
|
|
||||||
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
|
|
||||||
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
|
||||||
/*
|
|
||||||
* This can happen if the peer stops aggregation
|
|
||||||
* again before we've had a chance to drain the
|
|
||||||
* queue we selected previously, i.e. before the
|
|
||||||
* session was really started completely.
|
|
||||||
*/
|
|
||||||
IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
|
|
||||||
goto turn_off;
|
|
||||||
case IWL_AGG_ON:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
IWL_WARN(trans, "Stopping AGG while state not ON "
|
|
||||||
"or starting for %d on %d (%d)\n", sta_id, tid,
|
|
||||||
trans->shrd->tid_data[sta_id][tid].agg.state);
|
|
||||||
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
|
|
||||||
|
|
||||||
/* There are still packets for this RA / TID in the HW */
|
|
||||||
if (tid_data->agg.ssn != tid_data->next_reclaimed) {
|
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Can't proceed: ssn %d, "
|
|
||||||
"next_recl = %d",
|
|
||||||
tid_data->agg.ssn,
|
|
||||||
tid_data->next_reclaimed);
|
|
||||||
trans->shrd->tid_data[sta_id][tid].agg.state =
|
|
||||||
IWL_EMPTYING_HW_QUEUE_DELBA;
|
|
||||||
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Can proceed: ssn = next_recl = %d",
|
|
||||||
tid_data->agg.ssn);
|
|
||||||
turn_off:
|
|
||||||
trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
|
|
||||||
|
|
||||||
/* do not restore/save irqs */
|
|
||||||
spin_unlock(&trans->shrd->sta_lock);
|
|
||||||
spin_lock(&trans->shrd->lock);
|
|
||||||
|
|
||||||
iwl_trans_pcie_txq_agg_disable(trans, txq_id);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&trans->shrd->lock, flags);
|
|
||||||
|
|
||||||
iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1290,7 +1290,7 @@ static int iwlagn_txq_check_empty(struct iwl_trans *trans,
|
||||||
IWL_DEBUG_TX_QUEUES(trans,
|
IWL_DEBUG_TX_QUEUES(trans,
|
||||||
"Can continue DELBA flow ssn = next_recl ="
|
"Can continue DELBA flow ssn = next_recl ="
|
||||||
" %d", tid_data->next_reclaimed);
|
" %d", tid_data->next_reclaimed);
|
||||||
iwl_trans_pcie_txq_agg_disable(trans, txq_id);
|
iwl_trans_pcie_tx_agg_disable(trans, sta_id, tid);
|
||||||
tid_data->agg.state = IWL_AGG_OFF;
|
tid_data->agg.state = IWL_AGG_OFF;
|
||||||
iwl_stop_tx_ba_trans_ready(priv(trans),
|
iwl_stop_tx_ba_trans_ready(priv(trans),
|
||||||
NUM_IWL_RXON_CTX,
|
NUM_IWL_RXON_CTX,
|
||||||
|
|
|
@ -184,8 +184,7 @@ struct iwl_trans_ops {
|
||||||
struct sk_buff_head *skbs);
|
struct sk_buff_head *skbs);
|
||||||
|
|
||||||
int (*tx_agg_disable)(struct iwl_trans *trans,
|
int (*tx_agg_disable)(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx, int sta_id,
|
int sta_id, int tid);
|
||||||
int tid);
|
|
||||||
int (*tx_agg_alloc)(struct iwl_trans *trans,
|
int (*tx_agg_alloc)(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
||||||
u16 *ssn);
|
u16 *ssn);
|
||||||
|
@ -318,10 +317,9 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
|
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx,
|
|
||||||
int sta_id, int tid)
|
int sta_id, int tid)
|
||||||
{
|
{
|
||||||
return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
|
return trans->ops->tx_agg_disable(trans, sta_id, tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
|
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
|
||||||
|
|
Loading…
Reference in a new issue