iwlwifi: tid_data logic move to upper layer - tx AGG setup
The tid_data is not related to the transport layer, so move the logic that depends on it to the upper layer. This patch deals with tx AGG setup. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
This commit is contained in:
parent
3c69b59542
commit
822e8b2a2d
6 changed files with 74 additions and 64 deletions
|
@ -527,6 +527,67 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
|
struct ieee80211_sta *sta, u16 tid, u8 buf_size)
|
||||||
|
{
|
||||||
|
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
|
||||||
|
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
|
||||||
|
unsigned long flags;
|
||||||
|
u16 ssn;
|
||||||
|
|
||||||
|
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
|
||||||
|
ssn = priv->shrd->tid_data[sta_priv->sta_id][tid].agg.ssn;
|
||||||
|
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
|
||||||
|
|
||||||
|
iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
|
||||||
|
buf_size, ssn);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the limit is 0, then it wasn't initialised yet,
|
||||||
|
* use the default. We can do that since we take the
|
||||||
|
* minimum below, and we don't want to go above our
|
||||||
|
* default due to hardware restrictions.
|
||||||
|
*/
|
||||||
|
if (sta_priv->max_agg_bufsize == 0)
|
||||||
|
sta_priv->max_agg_bufsize =
|
||||||
|
LINK_QUAL_AGG_FRAME_LIMIT_DEF;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Even though in theory the peer could have different
|
||||||
|
* aggregation reorder buffer sizes for different sessions,
|
||||||
|
* our ucode doesn't allow for that and has a global limit
|
||||||
|
* for each station. Therefore, use the minimum of all the
|
||||||
|
* aggregation sessions and our default value.
|
||||||
|
*/
|
||||||
|
sta_priv->max_agg_bufsize =
|
||||||
|
min(sta_priv->max_agg_bufsize, buf_size);
|
||||||
|
|
||||||
|
if (cfg(priv)->ht_params &&
|
||||||
|
cfg(priv)->ht_params->use_rts_for_aggregation) {
|
||||||
|
/*
|
||||||
|
* switch to RTS/CTS if it is the prefer protection
|
||||||
|
* method for HT traffic
|
||||||
|
*/
|
||||||
|
|
||||||
|
sta_priv->lq_sta.lq.general_params.flags |=
|
||||||
|
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
|
||||||
|
}
|
||||||
|
priv->agg_tids_count++;
|
||||||
|
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
|
||||||
|
priv->agg_tids_count);
|
||||||
|
|
||||||
|
sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
|
||||||
|
sta_priv->max_agg_bufsize;
|
||||||
|
|
||||||
|
IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
|
||||||
|
sta->addr, tid);
|
||||||
|
|
||||||
|
return iwl_send_lq_cmd(priv, ctx,
|
||||||
|
&sta_priv->lq_sta.lq, CMD_ASYNC, false);
|
||||||
|
}
|
||||||
|
|
||||||
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
|
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
|
||||||
struct iwl_rxon_context *ctx,
|
struct iwl_rxon_context *ctx,
|
||||||
const u8 *addr1)
|
const u8 *addr1)
|
||||||
|
|
|
@ -137,6 +137,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv);
|
||||||
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
|
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
|
||||||
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
|
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
|
||||||
|
int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
|
struct ieee80211_sta *sta, u16 tid, u8 buf_size);
|
||||||
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
struct ieee80211_sta *sta, u16 tid);
|
struct ieee80211_sta *sta, u16 tid);
|
||||||
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
|
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
|
||||||
|
|
|
@ -611,7 +611,6 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
|
||||||
struct iwl_priv *priv = hw->priv;
|
struct iwl_priv *priv = hw->priv;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
|
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
|
||||||
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
|
|
||||||
|
|
||||||
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
|
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
|
||||||
sta->addr, tid);
|
sta->addr, tid);
|
||||||
|
@ -659,54 +658,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
||||||
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
|
ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
|
||||||
|
|
||||||
iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
|
|
||||||
tid, buf_size);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the limit is 0, then it wasn't initialised yet,
|
|
||||||
* use the default. We can do that since we take the
|
|
||||||
* minimum below, and we don't want to go above our
|
|
||||||
* default due to hardware restrictions.
|
|
||||||
*/
|
|
||||||
if (sta_priv->max_agg_bufsize == 0)
|
|
||||||
sta_priv->max_agg_bufsize =
|
|
||||||
LINK_QUAL_AGG_FRAME_LIMIT_DEF;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Even though in theory the peer could have different
|
|
||||||
* aggregation reorder buffer sizes for different sessions,
|
|
||||||
* our ucode doesn't allow for that and has a global limit
|
|
||||||
* for each station. Therefore, use the minimum of all the
|
|
||||||
* aggregation sessions and our default value.
|
|
||||||
*/
|
|
||||||
sta_priv->max_agg_bufsize =
|
|
||||||
min(sta_priv->max_agg_bufsize, buf_size);
|
|
||||||
|
|
||||||
if (cfg(priv)->ht_params &&
|
|
||||||
cfg(priv)->ht_params->use_rts_for_aggregation) {
|
|
||||||
/*
|
|
||||||
* switch to RTS/CTS if it is the prefer protection
|
|
||||||
* method for HT traffic
|
|
||||||
*/
|
|
||||||
|
|
||||||
sta_priv->lq_sta.lq.general_params.flags |=
|
|
||||||
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
|
|
||||||
}
|
|
||||||
priv->agg_tids_count++;
|
|
||||||
IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
|
|
||||||
priv->agg_tids_count);
|
|
||||||
|
|
||||||
sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
|
|
||||||
sta_priv->max_agg_bufsize;
|
|
||||||
|
|
||||||
iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
|
|
||||||
&sta_priv->lq_sta.lq, CMD_ASYNC, false);
|
|
||||||
|
|
||||||
IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
|
|
||||||
sta->addr, tid);
|
|
||||||
ret = 0;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
mutex_unlock(&priv->shrd->mutex);
|
mutex_unlock(&priv->shrd->mutex);
|
||||||
|
|
|
@ -289,7 +289,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||||
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
|
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
|
||||||
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx,
|
enum iwl_rxon_context_id ctx,
|
||||||
int sta_id, int tid, int frame_limit);
|
int sta_id, int tid, int frame_limit, u16 ssn);
|
||||||
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||||
int index, enum dma_data_direction dma_dir);
|
int index, enum dma_data_direction dma_dir);
|
||||||
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
|
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
|
||||||
|
|
|
@ -448,12 +448,11 @@ static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
|
||||||
|
|
||||||
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx, int sta_id,
|
enum iwl_rxon_context_id ctx, int sta_id,
|
||||||
int tid, int frame_limit)
|
int tid, int frame_limit, u16 ssn)
|
||||||
{
|
{
|
||||||
int tx_fifo, txq_id, ssn_idx;
|
int tx_fifo, txq_id;
|
||||||
u16 ra_tid;
|
u16 ra_tid;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct iwl_tid_data *tid_data;
|
|
||||||
|
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
struct iwl_trans_pcie *trans_pcie =
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
@ -469,11 +468,7 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&trans->shrd->sta_lock, flags);
|
txq_id = trans->shrd->tid_data[sta_id][tid].agg.txq_id;
|
||||||
tid_data = &trans->shrd->tid_data[sta_id][tid];
|
|
||||||
ssn_idx = SEQ_TO_SN(tid_data->seq_number);
|
|
||||||
txq_id = tid_data->agg.txq_id;
|
|
||||||
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
|
||||||
|
|
||||||
ra_tid = BUILD_RAxTID(sta_id, tid);
|
ra_tid = BUILD_RAxTID(sta_id, tid);
|
||||||
|
|
||||||
|
@ -493,9 +488,9 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
||||||
|
|
||||||
/* Place first TFD at index corresponding to start sequence number.
|
/* Place first TFD at index corresponding to start sequence number.
|
||||||
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
||||||
trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
|
||||||
trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
|
||||||
iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
|
iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
|
||||||
|
|
||||||
/* Set up Tx window size and frame limit for this queue */
|
/* Set up Tx window size and frame limit for this queue */
|
||||||
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
|
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
|
||||||
|
|
|
@ -189,7 +189,7 @@ struct iwl_trans_ops {
|
||||||
int sta_id, int tid);
|
int sta_id, int tid);
|
||||||
void (*tx_agg_setup)(struct iwl_trans *trans,
|
void (*tx_agg_setup)(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
||||||
int frame_limit);
|
int frame_limit, u16 ssn);
|
||||||
|
|
||||||
void (*kick_nic)(struct iwl_trans *trans);
|
void (*kick_nic)(struct iwl_trans *trans);
|
||||||
|
|
||||||
|
@ -331,9 +331,9 @@ static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
|
||||||
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
|
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
|
||||||
enum iwl_rxon_context_id ctx,
|
enum iwl_rxon_context_id ctx,
|
||||||
int sta_id, int tid,
|
int sta_id, int tid,
|
||||||
int frame_limit)
|
int frame_limit, u16 ssn)
|
||||||
{
|
{
|
||||||
trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
|
trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
|
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
|
||||||
|
|
Loading…
Reference in a new issue