Major changes:

iwlwifi
 
 * some debugfs improvements
 * fix signedness in beacon statistics
 * deinline some functions to reduce size when device tracing is enabled
 * filter beacons out in AP mode when no stations are associated
 * deprecate firmwares version -12
 * fix a runtime PM vs. legacy suspend race
 * one-liner fix for a ToF bug
 * clean-ups in the rx code
 * small debugging improvement
 * fix WoWLAN with new firmware versions
 * more clean-ups towards multiple RX queues;
 * some rate scaling fixes and improvements;
 * some time-of-flight fixes;
 * other generic improvements and clean-ups;
 
 brcmfmac
 
 * rework code dealing with multiple interfaces
 * allow logging firmware console using debug level
 * support for BCM4350, BCM4365, and BCM4366 PCIE devices
 * fixed for legacy P2P and P2P device handling
 * correct set and get tx-power
 
 ath9k
 
 * add support for Outside Context of a BSS (OCB) mode
 
 mwifiex
 
 * add USB multichannel feature
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQEcBAABAgAGBQJWF9ciAAoJEG4XJFUm622bVaAH/3Fi4CaKrDF6L8lxSRWUZzft
 Ie2X0FC+d5knpS7dOd7iI02MuEuKCg3f6dmtDrCDFBqFohvfO5NkG4XU81jdIiWM
 Xkyxlgcy/1TuILNjQfNh/2nhjpvvHDCyptl+jimeT2VR2ITD/Vj3IOAMA5l4khyx
 OeWmgW7dT9xLwYYy20ql5QLGkbxwJlHawUw/d+3yiS+AHO+6dVGJL2OtpyrlPP/F
 0KpSj0lZY9UNRL+i6FbONDCBYeG+q/lA5G5nGXBF6zEeZ6BcuWNRcBBGr2n/6uMy
 gQMAunqBIunfYkfpEKYEPF5zoyO/wCmvPLxx56iS8okGSVw4KzQ2DtQ0leFbjBw=
 =1po3
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2015-10-09' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
Major changes:

iwlwifi

* some debugfs improvements
* fix signedness in beacon statistics
* deinline some functions to reduce size when device tracing is enabled
* filter beacons out in AP mode when no stations are associated
* deprecate firmwares version -12
* fix a runtime PM vs. legacy suspend race
* one-liner fix for a ToF bug
* clean-ups in the rx code
* small debugging improvement
* fix WoWLAN with new firmware versions
* more clean-ups towards multiple RX queues;
* some rate scaling fixes and improvements;
* some time-of-flight fixes;
* other generic improvements and clean-ups;

brcmfmac

* rework code dealing with multiple interfaces
* allow logging firmware console using debug level
* support for BCM4350, BCM4365, and BCM4366 PCIE devices
* fixed for legacy P2P and P2P device handling
* correct set and get tx-power

ath9k

* add support for Outside Context of a BSS (OCB) mode

mwifiex

* add USB multichannel feature
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-10-12 19:39:18 -07:00
commit 9916596742
117 changed files with 2675 additions and 1408 deletions

View file

@ -5546,7 +5546,7 @@ F: drivers/net/wireless/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi) INTEL WIRELESS WIFI LINK (iwlwifi)
M: Johannes Berg <johannes.berg@intel.com> M: Johannes Berg <johannes.berg@intel.com>
M: Emmanuel Grumbach <emmanuel.grumbach@intel.com> M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
M: Intel Linux Wireless <ilw@linux.intel.com> M: Intel Linux Wireless <linuxwifi@intel.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
W: http://intellinuxwireless.org W: http://intellinuxwireless.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git

View file

@ -436,13 +436,8 @@ int bcma_bus_register(struct bcma_bus *bus)
} }
dev = bcma_bus_get_host_dev(bus); dev = bcma_bus_get_host_dev(bus);
/* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when if (dev) {
* of_default_bus_match_table is exported or in some other way of_platform_default_populate(dev->of_node, NULL, dev);
* accessible. This is just a temporary workaround.
*/
if (IS_BUILTIN(CONFIG_BCMA) && dev) {
of_platform_populate(dev->of_node, of_default_bus_match_table,
NULL, dev);
} }
/* Cores providing flash access go before SPROM init */ /* Cores providing flash access go before SPROM init */

View file

@ -1237,6 +1237,7 @@ struct airo_info {
int wep_capable; int wep_capable;
int max_wep_idx; int max_wep_idx;
int last_auth;
/* WPA-related stuff */ /* WPA-related stuff */
unsigned int bssListFirst; unsigned int bssListFirst;
@ -3266,6 +3267,7 @@ static void airo_handle_link(struct airo_info *ai)
wake_up_interruptible(&ai->thr_wait); wake_up_interruptible(&ai->thr_wait);
} else } else
airo_send_event(ai->dev); airo_send_event(ai->dev);
netif_carrier_on(ai->dev);
} else if (!scan_forceloss) { } else if (!scan_forceloss) {
if (auto_wep && !ai->expires) { if (auto_wep && !ai->expires) {
ai->expires = RUN_AT(3*HZ); ai->expires = RUN_AT(3*HZ);
@ -3276,6 +3278,9 @@ static void airo_handle_link(struct airo_info *ai)
eth_zero_addr(wrqu.ap_addr.sa_data); eth_zero_addr(wrqu.ap_addr.sa_data);
wrqu.ap_addr.sa_family = ARPHRD_ETHER; wrqu.ap_addr.sa_family = ARPHRD_ETHER;
wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL); wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
netif_carrier_off(ai->dev);
} else {
netif_carrier_off(ai->dev);
} }
} }
@ -3612,6 +3617,7 @@ static void disable_MAC( struct airo_info *ai, int lock ) {
return; return;
if (test_bit(FLAG_ENABLED, &ai->flags)) { if (test_bit(FLAG_ENABLED, &ai->flags)) {
netif_carrier_off(ai->dev);
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.cmd = MAC_DISABLE; // disable in case already enabled cmd.cmd = MAC_DISABLE; // disable in case already enabled
issuecommand(ai, &cmd, &rsp); issuecommand(ai, &cmd, &rsp);
@ -3786,6 +3792,16 @@ static void mpi_receive_802_11(struct airo_info *ai)
} }
} }
static inline void set_auth_type(struct airo_info *local, int auth_type)
{
local->config.authType = auth_type;
/* Cache the last auth type used (of AUTH_OPEN and AUTH_ENCRYPT).
* Used by airo_set_auth()
*/
if (auth_type == AUTH_OPEN || auth_type == AUTH_ENCRYPT)
local->last_auth = auth_type;
}
static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
{ {
Cmd cmd; Cmd cmd;
@ -3862,7 +3878,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
"level scale"); "level scale");
} }
ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
ai->config.authType = AUTH_OPEN; set_auth_type(ai, AUTH_OPEN);
ai->config.modulation = MOD_CCK; ai->config.modulation = MOD_CCK;
if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) && if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
@ -4880,13 +4896,13 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
line += 5; line += 5;
switch( line[0] ) { switch( line[0] ) {
case 's': case 's':
ai->config.authType = AUTH_SHAREDKEY; set_auth_type(ai, AUTH_SHAREDKEY);
break; break;
case 'e': case 'e':
ai->config.authType = AUTH_ENCRYPT; set_auth_type(ai, AUTH_ENCRYPT);
break; break;
default: default:
ai->config.authType = AUTH_OPEN; set_auth_type(ai, AUTH_OPEN);
break; break;
} }
set_bit (FLAG_COMMIT, &ai->flags); set_bit (FLAG_COMMIT, &ai->flags);
@ -6368,9 +6384,8 @@ static int airo_set_encode(struct net_device *dev,
* should be enabled (user may turn it off later) * should be enabled (user may turn it off later)
* This is also how "iwconfig ethX key on" works */ * This is also how "iwconfig ethX key on" works */
if((index == current_index) && (key.len > 0) && if((index == current_index) && (key.len > 0) &&
(local->config.authType == AUTH_OPEN)) { (local->config.authType == AUTH_OPEN))
local->config.authType = AUTH_ENCRYPT; set_auth_type(local, AUTH_ENCRYPT);
}
} else { } else {
/* Do we want to just set the transmit key index ? */ /* Do we want to just set the transmit key index ? */
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
@ -6389,12 +6404,12 @@ static int airo_set_encode(struct net_device *dev,
} }
} }
/* Read the flags */ /* Read the flags */
if(dwrq->flags & IW_ENCODE_DISABLED) if (dwrq->flags & IW_ENCODE_DISABLED)
local->config.authType = AUTH_OPEN; // disable encryption set_auth_type(local, AUTH_OPEN); /* disable encryption */
if(dwrq->flags & IW_ENCODE_RESTRICTED) if(dwrq->flags & IW_ENCODE_RESTRICTED)
local->config.authType = AUTH_SHAREDKEY; // Only Both set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
if(dwrq->flags & IW_ENCODE_OPEN) if (dwrq->flags & IW_ENCODE_OPEN)
local->config.authType = AUTH_ENCRYPT; // Only Wep set_auth_type(local, AUTH_ENCRYPT); /* Only Wep */
/* Commit the changes to flags if needed */ /* Commit the changes to flags if needed */
if (local->config.authType != currentAuthType) if (local->config.authType != currentAuthType)
set_bit (FLAG_COMMIT, &local->flags); set_bit (FLAG_COMMIT, &local->flags);
@ -6549,12 +6564,12 @@ static int airo_set_encodeext(struct net_device *dev,
} }
/* Read the flags */ /* Read the flags */
if(encoding->flags & IW_ENCODE_DISABLED) if (encoding->flags & IW_ENCODE_DISABLED)
local->config.authType = AUTH_OPEN; // disable encryption set_auth_type(local, AUTH_OPEN); /* disable encryption */
if(encoding->flags & IW_ENCODE_RESTRICTED) if(encoding->flags & IW_ENCODE_RESTRICTED)
local->config.authType = AUTH_SHAREDKEY; // Only Both set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
if(encoding->flags & IW_ENCODE_OPEN) if (encoding->flags & IW_ENCODE_OPEN)
local->config.authType = AUTH_ENCRYPT; // Only Wep set_auth_type(local, AUTH_ENCRYPT);
/* Commit the changes to flags if needed */ /* Commit the changes to flags if needed */
if (local->config.authType != currentAuthType) if (local->config.authType != currentAuthType)
set_bit (FLAG_COMMIT, &local->flags); set_bit (FLAG_COMMIT, &local->flags);
@ -6659,9 +6674,9 @@ static int airo_set_auth(struct net_device *dev,
if (param->value) { if (param->value) {
/* Only change auth type if unencrypted */ /* Only change auth type if unencrypted */
if (currentAuthType == AUTH_OPEN) if (currentAuthType == AUTH_OPEN)
local->config.authType = AUTH_ENCRYPT; set_auth_type(local, AUTH_ENCRYPT);
} else { } else {
local->config.authType = AUTH_OPEN; set_auth_type(local, AUTH_OPEN);
} }
/* Commit the changes to flags if needed */ /* Commit the changes to flags if needed */
@ -6670,13 +6685,14 @@ static int airo_set_auth(struct net_device *dev,
break; break;
case IW_AUTH_80211_AUTH_ALG: { case IW_AUTH_80211_AUTH_ALG: {
/* FIXME: What about AUTH_OPEN? This API seems to
* disallow setting our auth to AUTH_OPEN.
*/
if (param->value & IW_AUTH_ALG_SHARED_KEY) { if (param->value & IW_AUTH_ALG_SHARED_KEY) {
local->config.authType = AUTH_SHAREDKEY; set_auth_type(local, AUTH_SHAREDKEY);
} else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
local->config.authType = AUTH_ENCRYPT; /* We don't know here if WEP open system or
* unencrypted mode was requested - so use the
* last mode (of these two) used last time
*/
set_auth_type(local, local->last_auth);
} else } else
return -EINVAL; return -EINVAL;

View file

@ -34,16 +34,19 @@ unsigned int ath10k_debug_mask;
static unsigned int ath10k_cryptmode_param; static unsigned int ath10k_cryptmode_param;
static bool uart_print; static bool uart_print;
static bool skip_otp; static bool skip_otp;
static bool rawmode;
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644); module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644); module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644); module_param(skip_otp, bool, 0644);
module_param(rawmode, bool, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging"); MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software"); MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
static const struct ath10k_hw_params ath10k_hw_params_list[] = { static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{ {
@ -54,6 +57,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.has_shifted_cc_wraparound = true, .has_shifted_cc_wraparound = true,
.otp_exe_param = 0, .otp_exe_param = 0,
.channel_counters_freq_hz = 88000, .channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.fw = { .fw = {
.dir = QCA988X_HW_2_0_FW_DIR, .dir = QCA988X_HW_2_0_FW_DIR,
.fw = QCA988X_HW_2_0_FW_FILE, .fw = QCA988X_HW_2_0_FW_FILE,
@ -70,6 +74,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin = 6, .uart_pin = 6,
.otp_exe_param = 0, .otp_exe_param = 0,
.channel_counters_freq_hz = 88000, .channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.fw = { .fw = {
.dir = QCA6174_HW_2_1_FW_DIR, .dir = QCA6174_HW_2_1_FW_DIR,
.fw = QCA6174_HW_2_1_FW_FILE, .fw = QCA6174_HW_2_1_FW_FILE,
@ -86,6 +91,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin = 6, .uart_pin = 6,
.otp_exe_param = 0, .otp_exe_param = 0,
.channel_counters_freq_hz = 88000, .channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.fw = { .fw = {
.dir = QCA6174_HW_3_0_FW_DIR, .dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE, .fw = QCA6174_HW_3_0_FW_FILE,
@ -102,6 +108,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin = 6, .uart_pin = 6,
.otp_exe_param = 0, .otp_exe_param = 0,
.channel_counters_freq_hz = 88000, .channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.fw = { .fw = {
/* uses same binaries as hw3.0 */ /* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR, .dir = QCA6174_HW_3_0_FW_DIR,
@ -120,6 +127,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0x00000700, .otp_exe_param = 0x00000700,
.continuous_frag_desc = true, .continuous_frag_desc = true,
.channel_counters_freq_hz = 150000, .channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.fw = { .fw = {
.dir = QCA99X0_HW_2_0_FW_DIR, .dir = QCA99X0_HW_2_0_FW_DIR,
.fw = QCA99X0_HW_2_0_FW_FILE, .fw = QCA99X0_HW_2_0_FW_FILE,
@ -142,12 +150,17 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp", [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
[ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad", [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init", [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
[ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
}; };
static unsigned int ath10k_core_get_fw_feature_str(char *buf, static unsigned int ath10k_core_get_fw_feature_str(char *buf,
size_t buf_len, size_t buf_len,
enum ath10k_fw_features feat) enum ath10k_fw_features feat)
{ {
/* make sure that ath10k_core_fw_feature_str[] gets updated */
BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) !=
ATH10K_FW_FEATURE_COUNT);
if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) || if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
WARN_ON(!ath10k_core_fw_feature_str[feat])) { WARN_ON(!ath10k_core_fw_feature_str[feat])) {
return scnprintf(buf, buf_len, "bit%d", feat); return scnprintf(buf, buf_len, "bit%d", feat);
@ -1117,6 +1130,15 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT; ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT; ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
if (rawmode) {
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
ar->fw_features)) {
ath10k_err(ar, "rawmode = 1 requires support from firmware");
return -EINVAL;
}
set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
}
if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW; ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
@ -1714,6 +1736,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_aux); destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar); ath10k_debug_destroy(ar);
ath10k_wmi_free_host_mem(ar);
ath10k_mac_destroy(ar); ath10k_mac_destroy(ar);
} }
EXPORT_SYMBOL(ath10k_core_destroy); EXPORT_SYMBOL(ath10k_core_destroy);

View file

@ -612,6 +612,11 @@ struct ath10k {
u32 channel_counters_freq_hz; u32 channel_counters_freq_hz;
/* Mgmt tx descriptors threshold for limiting probe response
* frames.
*/
u32 max_probe_resp_desc_thres;
struct ath10k_hw_params_fw { struct ath10k_hw_params_fw {
const char *dir; const char *dir;
const char *fw; const char *fw;

View file

@ -1485,6 +1485,7 @@ struct ath10k_htt {
spinlock_t tx_lock; spinlock_t tx_lock;
int max_num_pending_tx; int max_num_pending_tx;
int num_pending_tx; int num_pending_tx;
int num_pending_mgmt_tx;
struct idr pending_tx; struct idr pending_tx;
wait_queue_head_t empty_tx_wq; wait_queue_head_t empty_tx_wq;
struct dma_pool *tx_pool; struct dma_pool *tx_pool;
@ -1587,7 +1588,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu, u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu); u8 max_subfrms_amsdu);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);

View file

@ -643,6 +643,8 @@ struct amsdu_subframe_hdr {
__be16 len; __be16 len;
} __packed; } __packed;
#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
static void ath10k_htt_rx_h_rates(struct ath10k *ar, static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status, struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd) struct htt_rx_desc *rxd)
@ -650,6 +652,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_supported_band *sband; struct ieee80211_supported_band *sband;
u8 cck, rate, bw, sgi, mcs, nss; u8 cck, rate, bw, sgi, mcs, nss;
u8 preamble = 0; u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3; u32 info1, info2, info3;
info1 = __le32_to_cpu(rxd->ppdu_start.info1); info1 = __le32_to_cpu(rxd->ppdu_start.info1);
@ -692,10 +695,50 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
case HTT_RX_VHT_WITH_TXBF: case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
TODO check this */ TODO check this */
mcs = (info3 >> 4) & 0x0F;
nss = ((info2 >> 10) & 0x07) + 1;
bw = info2 & 3; bw = info2 & 3;
sgi = info3 & 1; sgi = info3 & 1;
group_id = (info2 >> 4) & 0x3F;
if (GROUP_ID_IS_SU_MIMO(group_id)) {
mcs = (info3 >> 4) & 0x0F;
nss = ((info2 >> 10) & 0x07) + 1;
} else {
/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
* so it's impossible to decode MCS. Also since
* firmware consumes Group Id Management frames host
* has no knowledge regarding group/user position
* mapping so it's impossible to pick the correct Nsts
* from VHT-SIG-A1.
*
* Bandwidth and SGI are valid so report the rateinfo
* on best-effort basis.
*/
mcs = 0;
nss = 1;
}
if (mcs > 0x09) {
ath10k_warn(ar, "invalid MCS received %u\n", mcs);
ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
__le32_to_cpu(rxd->attention.flags),
__le32_to_cpu(rxd->mpdu_start.info0),
__le32_to_cpu(rxd->mpdu_start.info1),
__le32_to_cpu(rxd->msdu_start.common.info0),
__le32_to_cpu(rxd->msdu_start.common.info1),
rxd->ppdu_start.info0,
__le32_to_cpu(rxd->ppdu_start.info1),
__le32_to_cpu(rxd->ppdu_start.info2),
__le32_to_cpu(rxd->ppdu_start.info3),
__le32_to_cpu(rxd->ppdu_start.info4));
ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
__le32_to_cpu(rxd->msdu_end.common.info0),
__le32_to_cpu(rxd->mpdu_end.info0));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"rx desc msdu payload: ",
rxd->msdu_payload, 50);
}
status->rate_idx = mcs; status->rate_idx = mcs;
status->vht_nss = nss; status->vht_nss = nss;

View file

@ -22,22 +22,28 @@
#include "txrx.h" #include "txrx.h"
#include "debug.h" #include "debug.h"
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
{ {
if (limit_mgmt_desc)
htt->num_pending_mgmt_tx--;
htt->num_pending_tx--; htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1) if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
} }
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
bool limit_mgmt_desc)
{ {
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
__ath10k_htt_tx_dec_pending(htt); __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
} }
static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
bool limit_mgmt_desc, bool is_probe_resp)
{ {
struct ath10k *ar = htt->ar;
int ret = 0; int ret = 0;
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
@ -47,6 +53,15 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
goto exit; goto exit;
} }
if (limit_mgmt_desc) {
if (is_probe_resp && (htt->num_pending_mgmt_tx >
ar->hw_params.max_probe_resp_desc_thres)) {
ret = -EBUSY;
goto exit;
}
htt->num_pending_mgmt_tx++;
}
htt->num_pending_tx++; htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx) if (htt->num_pending_tx == htt->max_num_pending_tx)
ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
@ -417,8 +432,19 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int len = 0; int len = 0;
int msdu_id = -1; int msdu_id = -1;
int res; int res;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
bool limit_mgmt_desc = false;
bool is_probe_resp = false;
if (ar->hw_params.max_probe_resp_desc_thres) {
limit_mgmt_desc = true;
if (ieee80211_is_probe_resp(hdr->frame_control))
is_probe_resp = true;
}
res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
res = ath10k_htt_tx_inc_pending(htt);
if (res) if (res)
goto err; goto err;
@ -476,7 +502,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
ath10k_htt_tx_free_msdu_id(htt, msdu_id); ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
err_tx_dec: err_tx_dec:
ath10k_htt_tx_dec_pending(htt); ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err: err:
return res; return res;
} }
@ -498,8 +524,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
dma_addr_t paddr = 0; dma_addr_t paddr = 0;
u32 frags_paddr = 0; u32 frags_paddr = 0;
struct htt_msdu_ext_desc *ext_desc = NULL; struct htt_msdu_ext_desc *ext_desc = NULL;
bool limit_mgmt_desc = false;
bool is_probe_resp = false;
res = ath10k_htt_tx_inc_pending(htt); if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
ar->hw_params.max_probe_resp_desc_thres) {
limit_mgmt_desc = true;
if (ieee80211_is_probe_resp(hdr->frame_control))
is_probe_resp = true;
}
res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
if (res) if (res)
goto err; goto err;
@ -528,7 +564,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
ieee80211_has_protected(hdr->frame_control)) { ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN); skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
} else if (!skb_cb->htt.nohwcrypt && } else if (!skb_cb->htt.nohwcrypt &&
skb_cb->txmode == ATH10K_HW_TXRX_RAW) { skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN); skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
} }
@ -678,7 +715,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
ath10k_htt_tx_free_msdu_id(htt, msdu_id); ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
err_tx_dec: err_tx_dec:
ath10k_htt_tx_dec_pending(htt); ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err: err:
return res; return res;
} }

View file

@ -413,16 +413,6 @@ enum ath10k_hw_rate_cck {
/* Number of Copy Engines supported */ /* Number of Copy Engines supported */
#define CE_COUNT ar->hw_values->ce_count #define CE_COUNT ar->hw_values->ce_count
/*
* Total number of PCIe MSI interrupts requested for all interrupt sources.
* PCIe standard forces this to be a power of 2.
* Some Host OS's limit MSI requests that can be granted to 8
* so for now we abide by this limit and avoid requesting more
* than that.
*/
#define MSI_NUM_REQUEST_LOG2 3
#define MSI_NUM_REQUEST (1<<MSI_NUM_REQUEST_LOG2)
/* /*
* Granted MSIs are assigned as follows: * Granted MSIs are assigned as follows:
* Firmware uses the first * Firmware uses the first

View file

@ -1070,6 +1070,7 @@ static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
return false; return false;
return ar->monitor || return ar->monitor ||
ar->filter_flags & FIF_OTHER_BSS ||
test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
} }
@ -3617,9 +3618,6 @@ static int ath10k_start_scan(struct ath10k *ar,
} }
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
/* Add a 200ms margin to account for event/command processing */
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
msecs_to_jiffies(arg->max_scan_time+200));
return 0; return 0;
} }
@ -4064,21 +4062,56 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
return 1; return 1;
} }
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
{
int nsts = ar->vht_cap_info;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
/* If firmware does not deliver to host number of space-time
* streams supported, assume it support up to 4 BF STS and return
* the value for VHT CAP: nsts-1)
* */
if (nsts == 0)
return 3;
return nsts;
}
static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
{
int sound_dim = ar->vht_cap_info;
sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
sound_dim >>=IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
/* If the sounding dimension is not advertised by the firmware,
* let's use a default value of 1
*/
if (sound_dim == 0)
return 1;
return sound_dim;
}
static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
{ {
u32 value = 0; u32 value = 0;
struct ath10k *ar = arvif->ar; struct ath10k *ar = arvif->ar;
int nsts;
int sound_dim;
if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
return 0; return 0;
nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET); value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET); value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
if (!value) if (!value)
return 0; return 0;
@ -4175,6 +4208,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_ADHOC:
arvif->vdev_type = WMI_VDEV_TYPE_IBSS; arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
break; break;
case NL80211_IFTYPE_MESH_POINT:
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
ret = -EINVAL;
ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
goto err;
}
arvif->vdev_type = WMI_VDEV_TYPE_AP;
break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP; arvif->vdev_type = WMI_VDEV_TYPE_AP;
@ -4215,6 +4256,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
* become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
*/ */
if (vif->type == NL80211_IFTYPE_ADHOC || if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_AP) { vif->type == NL80211_IFTYPE_AP) {
arvif->beacon_buf = dma_zalloc_coherent(ar->dev, arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
IEEE80211_MAX_FRAME_LEN, IEEE80211_MAX_FRAME_LEN,
@ -4554,6 +4596,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (ret) if (ret)
ath10k_warn(ar, "failed to update beacon template: %d\n", ath10k_warn(ar, "failed to update beacon template: %d\n",
ret); ret);
if (ieee80211_vif_is_mesh(vif)) {
/* mesh doesn't use SSID but firmware needs it */
strncpy(arvif->u.ap.ssid, "mesh",
sizeof(arvif->u.ap.ssid));
arvif->u.ap.ssid_len = 4;
}
} }
if (changed & BSS_CHANGED_AP_PROBE_RESP) { if (changed & BSS_CHANGED_AP_PROBE_RESP) {
@ -4751,6 +4800,11 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
} }
/* Add a 200ms margin to account for event/command processing */
ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
msecs_to_jiffies(arg.max_scan_time +
200));
exit: exit:
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return ret; return ret;
@ -5293,6 +5347,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_AUTH && } else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP || (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) { vif->type == NL80211_IFTYPE_ADHOC)) {
/* /*
* New association. * New association.
@ -5328,6 +5383,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC && } else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP || (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) { vif->type == NL80211_IFTYPE_ADHOC)) {
/* /*
* Disassociation. * Disassociation.
@ -5901,7 +5957,7 @@ ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
} }
static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
u8 rate, u8 nss, u8 sgi) u8 rate, u8 nss, u8 sgi, u8 ldpc)
{ {
struct ath10k *ar = arvif->ar; struct ath10k *ar = arvif->ar;
u32 vdev_param; u32 vdev_param;
@ -5934,6 +5990,13 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
return ret; return ret;
} }
vdev_param = ar->wmi.vdev_param->ldpc;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
if (ret) {
ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
return ret;
}
return 0; return 0;
} }
@ -5997,6 +6060,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
u8 rate; u8 rate;
u8 nss; u8 nss;
u8 sgi; u8 sgi;
u8 ldpc;
int single_nss; int single_nss;
int ret; int ret;
@ -6006,6 +6070,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
band = def.chan->band; band = def.chan->band;
ht_mcs_mask = mask->control[band].ht_mcs; ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs; vht_mcs_mask = mask->control[band].vht_mcs;
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi; sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI) if (sgi == NL80211_TXRATE_FORCE_LGI)
@ -6044,7 +6109,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi); ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
arvif->vdev_id, ret); arvif->vdev_id, ret);
@ -6218,6 +6283,94 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
rcu_read_unlock(); rcu_read_unlock();
} }
static void
ath10k_mac_update_vif_chan(struct ath10k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs)
{
struct ath10k_vif *arvif;
int ret;
int i;
lockdep_assert_held(&ar->conf_mutex);
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface.
*/
if (ar->monitor_started)
ath10k_monitor_stop(ar);
for (i = 0; i < n_vifs; i++) {
arvif = ath10k_vif_to_arvif(vifs[i].vif);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
vifs[i].old_ctx->def.width,
vifs[i].new_ctx->def.width);
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret) {
ath10k_warn(ar, "failed to down vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
/* All relevant vdevs are downed and associated channel resources
* should be available for the channel switch now.
*/
spin_lock_bh(&ar->data_lock);
ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
spin_unlock_bh(&ar->data_lock);
for (i = 0; i < n_vifs; i++) {
arvif = ath10k_vif_to_arvif(vifs[i].vif);
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
ret);
ret = ath10k_mac_setup_prb_tmpl(arvif);
if (ret)
ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
ret);
ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
if (ret) {
ath10k_warn(ar, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
ath10k_monitor_recalc(ar);
}
static int static int
ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx) struct ieee80211_chanctx_conf *ctx)
@ -6264,12 +6417,52 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
} }
struct ath10k_mac_change_chanctx_arg {
struct ieee80211_chanctx_conf *ctx;
struct ieee80211_vif_chanctx_switch *vifs;
int n_vifs;
int next_vif;
};
static void
ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k_mac_change_chanctx_arg *arg = data;
if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
return;
arg->n_vifs++;
}
static void
ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
ctx = rcu_access_pointer(vif->chanctx_conf);
if (ctx != arg->ctx)
return;
if (WARN_ON(arg->next_vif == arg->n_vifs))
return;
arg->vifs[arg->next_vif].vif = vif;
arg->vifs[arg->next_vif].old_ctx = ctx;
arg->vifs[arg->next_vif].new_ctx = ctx;
arg->next_vif++;
}
static void static void
ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx, struct ieee80211_chanctx_conf *ctx,
u32 changed) u32 changed)
{ {
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
@ -6283,6 +6476,30 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
goto unlock; goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
ieee80211_iterate_active_interfaces_atomic(
hw,
IEEE80211_IFACE_ITER_NORMAL,
ath10k_mac_change_chanctx_cnt_iter,
&arg);
if (arg.n_vifs == 0)
goto radar;
arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
GFP_KERNEL);
if (!arg.vifs)
goto radar;
ieee80211_iterate_active_interfaces_atomic(
hw,
IEEE80211_IFACE_ITER_NORMAL,
ath10k_mac_change_chanctx_fill_iter,
&arg);
ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
kfree(arg.vifs);
}
radar:
ath10k_recalc_radar_detection(ar); ath10k_recalc_radar_detection(ar);
/* FIXME: How to configure Rx chains properly? */ /* FIXME: How to configure Rx chains properly? */
@ -6402,91 +6619,13 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
enum ieee80211_chanctx_switch_mode mode) enum ieee80211_chanctx_switch_mode mode)
{ {
struct ath10k *ar = hw->priv; struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif;
int ret;
int i;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC, ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx switch n_vifs %d mode %d\n", "mac chanctx switch n_vifs %d mode %d\n",
n_vifs, mode); n_vifs, mode);
ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface.
*/
if (ar->monitor_started)
ath10k_monitor_stop(ar);
for (i = 0; i < n_vifs; i++) {
arvif = ath10k_vif_to_arvif(vifs[i].vif);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
arvif->vdev_id,
vifs[i].old_ctx->def.chan->center_freq,
vifs[i].new_ctx->def.chan->center_freq,
vifs[i].old_ctx->def.width,
vifs[i].new_ctx->def.width);
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret) {
ath10k_warn(ar, "failed to down vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
/* All relevant vdevs are downed and associated channel resources
* should be available for the channel switch now.
*/
spin_lock_bh(&ar->data_lock);
ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
spin_unlock_bh(&ar->data_lock);
for (i = 0; i < n_vifs; i++) {
arvif = ath10k_vif_to_arvif(vifs[i].vif);
if (WARN_ON(!arvif->is_started))
continue;
if (WARN_ON(!arvif->is_up))
continue;
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
ret);
ret = ath10k_mac_setup_prb_tmpl(arvif);
if (ret)
ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
ret);
ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
if (ret) {
ath10k_warn(ar, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
ath10k_monitor_recalc(ar);
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return 0; return 0;
@ -6642,6 +6781,9 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
{ {
.max = 7, .max = 7,
.types = BIT(NL80211_IFTYPE_AP) .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
| BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, },
}; };
@ -6649,6 +6791,9 @@ static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
{ {
.max = 8, .max = 8,
.types = BIT(NL80211_IFTYPE_AP) .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
| BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, },
}; };
@ -6686,6 +6831,9 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
{ {
.max = 2, .max = 2,
.types = BIT(NL80211_IFTYPE_AP) | .types = BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO), BIT(NL80211_IFTYPE_P2P_GO),
}, },
@ -6707,6 +6855,9 @@ static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
{ {
.max = 1, .max = 1,
.types = BIT(NL80211_IFTYPE_AP) | .types = BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_GO), BIT(NL80211_IFTYPE_P2P_GO),
}, },
{ {
@ -6773,6 +6924,9 @@ static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
{ {
.max = 16, .max = 16,
.types = BIT(NL80211_IFTYPE_AP) .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
| BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, },
}; };
@ -6804,7 +6958,7 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
val = ar->num_rf_chains - 1; val = ath10k_mac_get_vht_cap_bf_sts(ar);
val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
@ -6813,7 +6967,7 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
val = ar->num_rf_chains - 1; val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
@ -6997,7 +7151,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->interface_modes = ar->hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP); BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT);
ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask; ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask; ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;

View file

@ -2609,12 +2609,9 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
return ath10k_pci_request_irq_legacy(ar); return ath10k_pci_request_irq_legacy(ar);
case 1: case 1:
return ath10k_pci_request_irq_msi(ar); return ath10k_pci_request_irq_msi(ar);
case MSI_NUM_REQUEST: default:
return ath10k_pci_request_irq_msix(ar); return ath10k_pci_request_irq_msix(ar);
} }
ath10k_warn(ar, "unknown irq configuration upon request\n");
return -EINVAL;
} }
static void ath10k_pci_free_irq(struct ath10k *ar) static void ath10k_pci_free_irq(struct ath10k *ar)
@ -2657,7 +2654,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
/* Try MSI-X */ /* Try MSI-X */
if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
ar_pci->num_msi_intrs = MSI_NUM_REQUEST; ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
ar_pci->num_msi_intrs); ar_pci->num_msi_intrs);
if (ret > 0) if (ret > 0)
@ -2705,18 +2702,13 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
switch (ar_pci->num_msi_intrs) { switch (ar_pci->num_msi_intrs) {
case 0: case 0:
ath10k_pci_deinit_irq_legacy(ar); ath10k_pci_deinit_irq_legacy(ar);
return 0; break;
case 1:
/* fall-through */
case MSI_NUM_REQUEST:
pci_disable_msi(ar_pci->pdev);
return 0;
default: default:
pci_disable_msi(ar_pci->pdev); pci_disable_msi(ar_pci->pdev);
break;
} }
ath10k_warn(ar, "unknown irq configuration upon deinit\n"); return 0;
return -EINVAL;
} }
static int ath10k_pci_wait_for_target_init(struct ath10k *ar) static int ath10k_pci_wait_for_target_init(struct ath10k *ar)

View file

@ -52,6 +52,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ieee80211_tx_info *info; struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb; struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu; struct sk_buff *msdu;
struct ieee80211_hdr *hdr;
__le16 fc;
bool limit_mgmt_desc = false;
ath10k_dbg(ar, ATH10K_DBG_HTT, ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u discard %d no_ack %d success %d\n", "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
@ -72,14 +75,21 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
return; return;
} }
hdr = (struct ieee80211_hdr *)msdu->data;
fc = hdr->frame_control;
if (unlikely(ieee80211_is_mgmt(fc)) &&
ar->hw_params.max_probe_resp_desc_thres)
limit_mgmt_desc = true;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt); __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
if (htt->num_pending_tx == 0) if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq); wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
skb_cb = ATH10K_SKB_CB(msdu); skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->htt.txbuf) if (skb_cb->htt.txbuf)

View file

@ -3917,6 +3917,53 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
return 0; return 0;
} }
static bool
ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
const struct wlan_host_mem_req **mem_reqs,
u32 num_mem_reqs)
{
u32 req_id, num_units, unit_size, num_unit_info;
u32 pool_size;
int i, j;
bool found;
if (ar->wmi.num_mem_chunks != num_mem_reqs)
return false;
for (i = 0; i < num_mem_reqs; ++i) {
req_id = __le32_to_cpu(mem_reqs[i]->req_id);
num_units = __le32_to_cpu(mem_reqs[i]->num_units);
unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
if (ar->num_active_peers)
num_units = ar->num_active_peers + 1;
else
num_units = ar->max_num_peers + 1;
} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
num_units = ar->max_num_peers + 1;
} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
num_units = ar->max_num_vdevs + 1;
}
found = false;
for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
if (ar->wmi.mem_chunks[j].req_id == req_id) {
pool_size = num_units * round_up(unit_size, 4);
if (ar->wmi.mem_chunks[j].len == pool_size) {
found = true;
break;
}
}
}
if (!found)
return false;
}
return true;
}
static int static int
ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg) struct wmi_svc_rdy_ev_arg *arg)
@ -3997,6 +4044,7 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
struct wmi_svc_rdy_ev_arg arg = {}; struct wmi_svc_rdy_ev_arg arg = {};
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
int ret; int ret;
bool allocated;
if (!skb) { if (!skb) {
ath10k_warn(ar, "invalid service ready event skb\n"); ath10k_warn(ar, "invalid service ready event skb\n");
@ -4073,6 +4121,18 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
* and WMI_SERVICE_IRAM_TIDS, etc. * and WMI_SERVICE_IRAM_TIDS, etc.
*/ */
allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
num_mem_reqs);
if (allocated)
goto skip_mem_alloc;
/* Either this event is received during boot time or there is a change
* in memory requirement from firmware when compared to last request.
* Free any old memory and do a fresh allocation based on the current
* memory requirement.
*/
ath10k_wmi_free_host_mem(ar);
for (i = 0; i < num_mem_reqs; ++i) { for (i = 0; i < num_mem_reqs; ++i) {
req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id); req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units); num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
@ -4108,6 +4168,7 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
return; return;
} }
skip_mem_alloc:
ath10k_dbg(ar, ATH10K_DBG_WMI, ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n", "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
__le32_to_cpu(arg.min_tx_power), __le32_to_cpu(arg.min_tx_power),
@ -6660,15 +6721,10 @@ int ath10k_wmi_attach(struct ath10k *ar)
return 0; return 0;
} }
void ath10k_wmi_detach(struct ath10k *ar) void ath10k_wmi_free_host_mem(struct ath10k *ar)
{ {
int i; int i;
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)
dev_kfree_skb(ar->svc_rdy_skb);
/* free the host memory chunks requested by firmware */ /* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) { for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev, dma_free_coherent(ar->dev,
@ -6679,3 +6735,11 @@ void ath10k_wmi_detach(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0; ar->wmi.num_mem_chunks = 0;
} }
void ath10k_wmi_detach(struct ath10k *ar)
{
cancel_work_sync(&ar->svc_rdy_work);
if (ar->svc_rdy_skb)
dev_kfree_skb(ar->svc_rdy_skb);
}

View file

@ -6067,6 +6067,7 @@ struct ath10k_fw_stats_peer;
int ath10k_wmi_attach(struct ath10k *ar); int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar); void ath10k_wmi_detach(struct ath10k *ar);
void ath10k_wmi_free_host_mem(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);

View file

@ -1249,7 +1249,8 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0); AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) { if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
AR_SREV_9561(ah)) {
if (is_2g) if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
@ -1640,7 +1641,8 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
skip_tx_iqcal: skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
AR_SREV_9561(ah)) {
for (i = 0; i < AR9300_MAX_CHAINS; i++) { for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i))) if (!(ah->rxchainmask & (1 << i)))
continue; continue;

View file

@ -635,6 +635,7 @@ struct ath9k_vif_iter_data {
int nstations; /* number of station vifs */ int nstations; /* number of station vifs */
int nwds; /* number of WDS vifs */ int nwds; /* number of WDS vifs */
int nadhocs; /* number of adhoc vifs */ int nadhocs; /* number of adhoc vifs */
int nocbs; /* number of OCB vifs */
struct ieee80211_vif *primary_sta; struct ieee80211_vif *primary_sta;
}; };

View file

@ -741,8 +741,8 @@ static int read_file_misc(struct seq_file *file, void *data)
i++, (int)(ctx->assigned), iter_data.naps, i++, (int)(ctx->assigned), iter_data.naps,
iter_data.nstations, iter_data.nstations,
iter_data.nmeshes, iter_data.nwds); iter_data.nmeshes, iter_data.nwds);
seq_printf(file, " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n", seq_printf(file, " ADHOC: %i OCB: %i TOTAL: %hi BEACON-VIF: %hi\n",
iter_data.nadhocs, sc->cur_chan->nvifs, iter_data.nadhocs, iter_data.nocbs, sc->cur_chan->nvifs,
sc->nbcnvifs); sc->nbcnvifs);
} }

View file

@ -17,12 +17,8 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include "htc.h" #include "htc.h"
/* identify firmware images */ MODULE_FIRMWARE(HTC_7010_MODULE_FW);
#define FIRMWARE_AR7010_1_1 "htc_7010.fw" MODULE_FIRMWARE(HTC_9271_MODULE_FW);
#define FIRMWARE_AR9271 "htc_9271.fw"
MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
MODULE_FIRMWARE(FIRMWARE_AR9271);
static struct usb_device_id ath9k_hif_usb_ids[] = { static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
@ -1080,12 +1076,88 @@ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
device_unlock(parent); device_unlock(parent);
} }
static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context);
/* taken from iwlwifi */
static int ath9k_hif_request_firmware(struct hif_device_usb *hif_dev,
bool first)
{
char index[8], *chip;
int ret;
if (first) {
if (htc_use_dev_fw) {
hif_dev->fw_minor_index = FIRMWARE_MINOR_IDX_MAX + 1;
sprintf(index, "%s", "dev");
} else {
hif_dev->fw_minor_index = FIRMWARE_MINOR_IDX_MAX;
sprintf(index, "%d", hif_dev->fw_minor_index);
}
} else {
hif_dev->fw_minor_index--;
sprintf(index, "%d", hif_dev->fw_minor_index);
}
/* test for FW 1.3 */
if (MAJOR_VERSION_REQ == 1 && hif_dev->fw_minor_index == 3) {
const char *filename;
if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info))
filename = FIRMWARE_AR7010_1_1;
else
filename = FIRMWARE_AR9271;
/* expected fw locations:
* - htc_9271.fw (stable version 1.3, depricated)
*/
snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
"%s", filename);
} else if (hif_dev->fw_minor_index < FIRMWARE_MINOR_IDX_MIN) {
dev_err(&hif_dev->udev->dev, "no suitable firmware found!\n");
return -ENOENT;
} else {
if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info))
chip = "7010";
else
chip = "9271";
/* expected fw locations:
* - ath9k_htc/htc_9271-1.dev.0.fw (development version)
* - ath9k_htc/htc_9271-1.4.0.fw (stable version)
*/
snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
"%s/htc_%s-%d.%s.0.fw", HTC_FW_PATH,
chip, MAJOR_VERSION_REQ, index);
}
ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name,
&hif_dev->udev->dev, GFP_KERNEL,
hif_dev, ath9k_hif_usb_firmware_cb);
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Async request for firmware %s failed\n",
hif_dev->fw_name);
return ret;
}
dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n",
hif_dev->fw_name);
return ret;
}
static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context) static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
{ {
struct hif_device_usb *hif_dev = context; struct hif_device_usb *hif_dev = context;
int ret; int ret;
if (!fw) { if (!fw) {
ret = ath9k_hif_request_firmware(hif_dev, false);
if (!ret)
return;
dev_err(&hif_dev->udev->dev, dev_err(&hif_dev->udev->dev,
"ath9k_htc: Failed to get firmware %s\n", "ath9k_htc: Failed to get firmware %s\n",
hif_dev->fw_name); hif_dev->fw_name);
@ -1215,27 +1287,11 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
init_completion(&hif_dev->fw_done); init_completion(&hif_dev->fw_done);
/* Find out which firmware to load */ ret = ath9k_hif_request_firmware(hif_dev, true);
if (ret)
if (IS_AR7010_DEVICE(id->driver_info))
hif_dev->fw_name = FIRMWARE_AR7010_1_1;
else
hif_dev->fw_name = FIRMWARE_AR9271;
ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name,
&hif_dev->udev->dev, GFP_KERNEL,
hif_dev, ath9k_hif_usb_firmware_cb);
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Async request for firmware %s failed\n",
hif_dev->fw_name);
goto err_fw_req; goto err_fw_req;
}
dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n", return ret;
hif_dev->fw_name);
return 0;
err_fw_req: err_fw_req:
usb_set_intfdata(interface, NULL); usb_set_intfdata(interface, NULL);

View file

@ -17,8 +17,26 @@
#ifndef HTC_USB_H #ifndef HTC_USB_H
#define HTC_USB_H #define HTC_USB_H
/* old firmware images */
#define FIRMWARE_AR7010_1_1 "htc_7010.fw"
#define FIRMWARE_AR9271 "htc_9271.fw"
/* supported Major FW version */
#define MAJOR_VERSION_REQ 1 #define MAJOR_VERSION_REQ 1
#define MINOR_VERSION_REQ 3 #define MINOR_VERSION_REQ 3
/* minimal and maximal supported Minor FW version. */
#define FIRMWARE_MINOR_IDX_MAX 4
#define FIRMWARE_MINOR_IDX_MIN 3
#define HTC_FW_PATH "ath9k_htc"
#define HTC_9271_MODULE_FW HTC_FW_PATH "/htc_9271-" \
__stringify(MAJOR_VERSION_REQ) \
"." __stringify(FIRMWARE_MINOR_IDX_MAX) ".0.fw"
#define HTC_7010_MODULE_FW HTC_FW_PATH "/htc_7010-" \
__stringify(MAJOR_VERSION_REQ) \
"." __stringify(FIRMWARE_MINOR_IDX_MAX) ".0.fw"
extern int htc_use_dev_fw;
#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB)) #define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB))
@ -101,7 +119,8 @@ struct hif_device_usb {
struct usb_anchor reg_in_submitted; struct usb_anchor reg_in_submitted;
struct usb_anchor mgmt_submitted; struct usb_anchor mgmt_submitted;
struct sk_buff *remain_skb; struct sk_buff *remain_skb;
const char *fw_name; char fw_name[32];
int fw_minor_index;
int rx_remain_len; int rx_remain_len;
int rx_pkt_len; int rx_pkt_len;
int rx_transfer_len; int rx_transfer_len;

View file

@ -38,6 +38,10 @@ static int ath9k_ps_enable;
module_param_named(ps_enable, ath9k_ps_enable, int, 0444); module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
int htc_use_dev_fw = 0;
module_param_named(use_dev_fw, htc_use_dev_fw, int, 0444);
MODULE_PARM_DESC(use_dev_fw, "Use development FW version");
#ifdef CONFIG_MAC80211_LEDS #ifdef CONFIG_MAC80211_LEDS
int ath9k_htc_led_blink = 1; int ath9k_htc_led_blink = 1;
module_param_named(blink, ath9k_htc_led_blink, int, 0444); module_param_named(blink, ath9k_htc_led_blink, int, 0444);
@ -736,7 +740,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_MESH_POINT); BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_OCB);
hw->wiphy->iface_combinations = &if_comb; hw->wiphy->iface_combinations = &if_comb;
hw->wiphy->n_iface_combinations = 1; hw->wiphy->n_iface_combinations = 1;

View file

@ -1241,6 +1241,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
break; break;
} }
/* fall through */ /* fall through */
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
set |= AR_STA_ID1_STA_AP; set |= AR_STA_ID1_STA_AP;

View file

@ -855,7 +855,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_WDS); BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_OCB);
if (ath9k_is_chanctx_enabled()) if (ath9k_is_chanctx_enabled())
hw->wiphy->interface_modes |= hw->wiphy->interface_modes |=

View file

@ -938,6 +938,9 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
if (avp->assoc && !iter_data->primary_sta) if (avp->assoc && !iter_data->primary_sta)
iter_data->primary_sta = vif; iter_data->primary_sta = vif;
break; break;
case NL80211_IFTYPE_OCB:
iter_data->nocbs++;
break;
case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_ADHOC:
iter_data->nadhocs++; iter_data->nadhocs++;
if (vif->bss_conf.enable_beacon) if (vif->bss_conf.enable_beacon)
@ -1111,6 +1114,8 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
if (iter_data.nmeshes) if (iter_data.nmeshes)
ah->opmode = NL80211_IFTYPE_MESH_POINT; ah->opmode = NL80211_IFTYPE_MESH_POINT;
else if (iter_data.nocbs)
ah->opmode = NL80211_IFTYPE_OCB;
else if (iter_data.nwds) else if (iter_data.nwds)
ah->opmode = NL80211_IFTYPE_AP; ah->opmode = NL80211_IFTYPE_AP;
else if (iter_data.nadhocs) else if (iter_data.nadhocs)
@ -1760,7 +1765,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath9k_calculate_summary_state(sc, avp->chanctx); ath9k_calculate_summary_state(sc, avp->chanctx);
} }
if (changed & BSS_CHANGED_IBSS) { if ((changed & BSS_CHANGED_IBSS) ||
(changed & BSS_CHANGED_OCB)) {
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = bss_conf->aid; common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah); ath9k_hw_write_associd(sc->sc_ah);

View file

@ -403,7 +403,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
(sc->cur_chan->nvifs <= 1) && (sc->cur_chan->nvifs <= 1) &&
!(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC)) !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC))
rfilt |= ATH9K_RX_FILTER_MYBEACON; rfilt |= ATH9K_RX_FILTER_MYBEACON;
else else if (sc->sc_ah->opmode != NL80211_IFTYPE_OCB)
rfilt |= ATH9K_RX_FILTER_BEACON; rfilt |= ATH9K_RX_FILTER_BEACON;
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||

View file

@ -453,7 +453,7 @@ static void carl9170_rx_phy_status(struct ar9170 *ar,
/* post-process RSSI */ /* post-process RSSI */
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
if (phy->rssi[i] & 0x80) if (phy->rssi[i] & 0x80)
phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f; phy->rssi[i] = ((~phy->rssi[i] & 0x7f) + 1) & 0x7f;
/* TODO: we could do something with phy_errors */ /* TODO: we could do something with phy_errors */
status->signal = ar->noise[0] + phy->rssi_combined; status->signal = ar->noise[0] + phy->rssi_combined;

View file

@ -21,12 +21,6 @@
#include "dfs_pri_detector.h" #include "dfs_pri_detector.h"
#include "ath.h" #include "ath.h"
/*
* tolerated deviation of radar time stamp in usecs on both sides
* TODO: this might need to be HW-dependent
*/
#define PRI_TOLERANCE 16
/** /**
* struct radar_types - contains array of patterns defined for one DFS domain * struct radar_types - contains array of patterns defined for one DFS domain
* @domain: DFS regulatory domain * @domain: DFS regulatory domain
@ -121,7 +115,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false), JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false), JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false), JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false), JP_PATTERN(7, 50, 100, 1000, 2000, 1, 3, 50, false),
JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false), JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
}; };

View file

@ -21,6 +21,11 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/nl80211.h> #include <linux/nl80211.h>
/* tolerated deviation of radar time stamp in usecs on both sides
* TODO: this might need to be HW-dependent
*/
#define PRI_TOLERANCE 16
/** /**
* struct ath_dfs_pool_stats - DFS Statistics for global pools * struct ath_dfs_pool_stats - DFS Statistics for global pools
*/ */

View file

@ -25,6 +25,9 @@ struct ath_dfs_pool_stats global_dfs_pool_stats = {};
#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++) #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--) #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
#define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
(MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
MIN + PRI_TOLERANCE : RUNTIME)
/** /**
* struct pulse_elem - elements in pulse queue * struct pulse_elem - elements in pulse queue
@ -243,7 +246,8 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
ps.count_falses = 0; ps.count_falses = 0;
ps.first_ts = p->ts; ps.first_ts = p->ts;
ps.last_ts = ts; ps.last_ts = ts;
ps.pri = ts - p->ts; ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
pde->rs->pri_max, ts - p->ts);
ps.dur = ps.pri * (pde->rs->ppb - 1) ps.dur = ps.pri * (pde->rs->ppb - 1)
+ 2 * pde->rs->max_pri_tolerance; + 2 * pde->rs->max_pri_tolerance;

View file

@ -272,10 +272,11 @@ brcmf_proto_bcdc_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
} }
static int static int
brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *pktbuf) struct sk_buff *pktbuf, struct brcmf_if **ifp)
{ {
struct brcmf_proto_bcdc_header *h; struct brcmf_proto_bcdc_header *h;
struct brcmf_if *tmp_if;
brcmf_dbg(BCDC, "Enter\n"); brcmf_dbg(BCDC, "Enter\n");
@ -289,30 +290,21 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
trace_brcmf_bcdchdr(pktbuf->data); trace_brcmf_bcdchdr(pktbuf->data);
h = (struct brcmf_proto_bcdc_header *)(pktbuf->data); h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
*ifidx = BCDC_GET_IF_IDX(h); tmp_if = brcmf_get_ifp(drvr, BCDC_GET_IF_IDX(h));
if (*ifidx >= BRCMF_MAX_IFS) { if (!tmp_if) {
brcmf_err("rx data ifnum out of range (%d)\n", *ifidx); brcmf_dbg(INFO, "no matching ifp found\n");
return -EBADE; return -EBADE;
} }
/* The ifidx is the idx to map to matching netdev/ifp. When receiving
* events this is easy because it contains the bssidx which maps
* 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
* bssidx 1 is used for p2p0 and no data can be received or
* transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
*/
if (*ifidx)
(*ifidx)++;
if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) != if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) !=
BCDC_PROTO_VER) { BCDC_PROTO_VER) {
brcmf_err("%s: non-BCDC packet received, flags 0x%x\n", brcmf_err("%s: non-BCDC packet received, flags 0x%x\n",
brcmf_ifname(drvr, *ifidx), h->flags); brcmf_ifname(drvr, tmp_if->ifidx), h->flags);
return -EBADE; return -EBADE;
} }
if (h->flags & BCDC_FLAG_SUM_GOOD) { if (h->flags & BCDC_FLAG_SUM_GOOD) {
brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n", brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
brcmf_ifname(drvr, *ifidx), h->flags); brcmf_ifname(drvr, tmp_if->ifidx), h->flags);
pktbuf->ip_summed = CHECKSUM_UNNECESSARY; pktbuf->ip_summed = CHECKSUM_UNNECESSARY;
} }
@ -320,12 +312,14 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
skb_pull(pktbuf, BCDC_HEADER_LEN); skb_pull(pktbuf, BCDC_HEADER_LEN);
if (do_fws) if (do_fws)
brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf); brcmf_fws_hdrpull(tmp_if, h->data_offset << 2, pktbuf);
else else
skb_pull(pktbuf, h->data_offset << 2); skb_pull(pktbuf, h->data_offset << 2);
if (pktbuf->len == 0) if (pktbuf->len == 0)
return -ENODATA; return -ENODATA;
*ifp = tmp_if;
return 0; return 0;
} }

View file

@ -149,7 +149,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data)
static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci, static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci,
bool trump_sco) bool trump_sco)
{ {
struct brcmf_if *ifp = btci->cfg->pub->iflist[0]; struct brcmf_if *ifp = brcmf_get_ifp(btci->cfg->pub, 0);
if (trump_sco && !btci->saved_regs_part2) { if (trump_sco && !btci->saved_regs_part2) {
/* this should reduce eSCO agressive /* this should reduce eSCO agressive
@ -468,7 +468,7 @@ int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif,
{ {
struct brcmf_cfg80211_info *cfg = wiphy_priv(vif->wdev.wiphy); struct brcmf_cfg80211_info *cfg = wiphy_priv(vif->wdev.wiphy);
struct brcmf_btcoex_info *btci = cfg->btcoex; struct brcmf_btcoex_info *btci = cfg->btcoex;
struct brcmf_if *ifp = cfg->pub->iflist[0]; struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
switch (mode) { switch (mode) {
case BRCMF_BTCOEX_DISABLED: case BRCMF_BTCOEX_DISABLED:

View file

@ -236,89 +236,6 @@ static int brcmf_roamoff;
module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR); module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
MODULE_PARM_DESC(roamoff, "do not use internal roaming engine"); MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
/* Quarter dBm units to mW
* Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
* Table is offset so the last entry is largest mW value that fits in
* a u16.
*/
#define QDBM_OFFSET 153 /* Offset for first entry */
#define QDBM_TABLE_LEN 40 /* Table size */
/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
* Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
*/
#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
/* Largest mW value that will round down to the last table entry,
* QDBM_OFFSET + QDBM_TABLE_LEN-1.
* Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) +
* mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
*/
#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
};
static u16 brcmf_qdbm_to_mw(u8 qdbm)
{
uint factor = 1;
int idx = qdbm - QDBM_OFFSET;
if (idx >= QDBM_TABLE_LEN)
/* clamp to max u16 mW value */
return 0xFFFF;
/* scale the qdBm index up to the range of the table 0-40
* where an offset of 40 qdBm equals a factor of 10 mW.
*/
while (idx < 0) {
idx += 40;
factor *= 10;
}
/* return the mW value scaled down to the correct factor of 10,
* adding in factor/2 to get proper rounding.
*/
return (nqdBm_to_mW_map[idx] + factor / 2) / factor;
}
static u8 brcmf_mw_to_qdbm(u16 mw)
{
u8 qdbm;
int offset;
uint mw_uint = mw;
uint boundary;
/* handle boundary case */
if (mw_uint <= 1)
return 0;
offset = QDBM_OFFSET;
/* move mw into the range of the table */
while (mw_uint < QDBM_TABLE_LOW_BOUND) {
mw_uint *= 10;
offset -= 40;
}
for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) {
boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] -
nqdBm_to_mW_map[qdbm]) / 2;
if (mw_uint < boundary)
break;
}
qdbm += (u8) offset;
return qdbm;
}
static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
struct cfg80211_chan_def *ch) struct cfg80211_chan_def *ch)
@ -860,6 +777,37 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0; s32 err = 0;
brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type); brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type);
/* WAR: There are a number of p2p interface related problems which
* need to be handled initially (before doing the validate).
* wpa_supplicant tends to do iface changes on p2p device/client/go
* which are not always possible/allowed. However we need to return
* OK otherwise the wpa_supplicant wont start. The situation differs
* on configuration and setup (p2pon=1 module param). The first check
* is to see if the request is a change to station for p2p iface.
*/
if ((type == NL80211_IFTYPE_STATION) &&
((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
(vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) ||
(vif->wdev.iftype == NL80211_IFTYPE_P2P_DEVICE))) {
brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
/* Now depending on whether module param p2pon=1 was used the
* response needs to be either 0 or EOPNOTSUPP. The reason is
* that if p2pon=1 is used, but a newer supplicant is used then
* we should return an error, as this combination wont work.
* In other situations 0 is returned and supplicant will start
* normally. It will give a trace in cfg80211, but it is the
* only way to get it working. Unfortunately this will result
* in situation where we wont support new supplicant in
* combination with module param p2pon=1, but that is the way
* it is. If the user tries this then unloading of driver might
* fail/lock.
*/
if (cfg->p2p.p2pdev_dynamically)
return -EOPNOTSUPP;
else
return 0;
}
err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type); err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
if (err) { if (err) {
brcmf_err("iface validation failed: err=%d\n", err); brcmf_err("iface validation failed: err=%d\n", err);
@ -875,18 +823,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
infra = 0; infra = 0;
break; break;
case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_STATION:
/* Ignore change for p2p IF. Unclear why supplicant does this */
if ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
(vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
/* WAR: It is unexpected to get a change of VIF for P2P
* IF, but it happens. The request can not be handled
* but returning EPERM causes a crash. Returning 0
* without setting ieee80211_ptr->iftype causes trace
* (WARN_ON) but it works with wpa_supplicant
*/
return 0;
}
infra = 1; infra = 1;
break; break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
@ -2017,16 +1953,14 @@ static s32
brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, s32 mbm) enum nl80211_tx_power_setting type, s32 mbm)
{ {
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg); struct net_device *ndev = cfg_to_ndev(cfg);
struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_if *ifp = netdev_priv(ndev);
u16 txpwrmw; s32 err;
s32 err = 0; s32 disable;
s32 disable = 0; u32 qdbm = 127;
s32 dbm = MBM_TO_DBM(mbm);
brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "Enter %d %d\n", type, mbm);
if (!check_vif_up(ifp->vif)) if (!check_vif_up(ifp->vif))
return -EIO; return -EIO;
@ -2035,12 +1969,20 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
break; break;
case NL80211_TX_POWER_LIMITED: case NL80211_TX_POWER_LIMITED:
case NL80211_TX_POWER_FIXED: case NL80211_TX_POWER_FIXED:
if (dbm < 0) { if (mbm < 0) {
brcmf_err("TX_POWER_FIXED - dbm is negative\n"); brcmf_err("TX_POWER_FIXED - dbm is negative\n");
err = -EINVAL; err = -EINVAL;
goto done; goto done;
} }
qdbm = MBM_TO_DBM(4 * mbm);
if (qdbm > 127)
qdbm = 127;
qdbm |= WL_TXPWR_OVERRIDE;
break; break;
default:
brcmf_err("Unsupported type %d\n", type);
err = -EINVAL;
goto done;
} }
/* Make sure radio is off or on as far as software is concerned */ /* Make sure radio is off or on as far as software is concerned */
disable = WL_RADIO_SW_DISABLE << 16; disable = WL_RADIO_SW_DISABLE << 16;
@ -2048,52 +1990,44 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
if (err) if (err)
brcmf_err("WLC_SET_RADIO error (%d)\n", err); brcmf_err("WLC_SET_RADIO error (%d)\n", err);
if (dbm > 0xffff) err = brcmf_fil_iovar_int_set(ifp, "qtxpower", qdbm);
txpwrmw = 0xffff;
else
txpwrmw = (u16) dbm;
err = brcmf_fil_iovar_int_set(ifp, "qtxpower",
(s32)brcmf_mw_to_qdbm(txpwrmw));
if (err) if (err)
brcmf_err("qtxpower error (%d)\n", err); brcmf_err("qtxpower error (%d)\n", err);
cfg->conf->tx_power = dbm;
done: done:
brcmf_dbg(TRACE, "Exit\n"); brcmf_dbg(TRACE, "Exit %d (qdbm)\n", qdbm & ~WL_TXPWR_OVERRIDE);
return err; return err;
} }
static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, static s32
struct wireless_dev *wdev, brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
s32 *dbm) s32 *dbm)
{ {
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); struct net_device *ndev = cfg_to_ndev(cfg);
s32 txpwrdbm; struct brcmf_if *ifp = netdev_priv(ndev);
u8 result; s32 qdbm = 0;
s32 err = 0; s32 err;
brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "Enter\n");
if (!check_vif_up(ifp->vif)) if (!check_vif_up(ifp->vif))
return -EIO; return -EIO;
err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &txpwrdbm); err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &qdbm);
if (err) { if (err) {
brcmf_err("error (%d)\n", err); brcmf_err("error (%d)\n", err);
goto done; goto done;
} }
*dbm = (qdbm & ~WL_TXPWR_OVERRIDE) / 4;
result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
*dbm = (s32) brcmf_qdbm_to_mw(result);
done: done:
brcmf_dbg(TRACE, "Exit\n"); brcmf_dbg(TRACE, "Exit (0x%x %d)\n", qdbm, *dbm);
return err; return err;
} }
static s32 static s32
brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, bool unicast, bool multicast) u8 key_idx, bool unicast, bool multicast)
{ {
struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_if *ifp = netdev_priv(ndev);
u32 index; u32 index;
@ -4747,7 +4681,8 @@ void brcmf_cfg80211_free_netdev(struct net_device *ndev)
ifp = netdev_priv(ndev); ifp = netdev_priv(ndev);
vif = ifp->vif; vif = ifp->vif;
brcmf_free_vif(vif); if (vif)
brcmf_free_vif(vif);
free_netdev(ndev); free_netdev(ndev);
} }
@ -4983,7 +4918,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
brcmf_dbg(CONN, "AP mode link down\n"); brcmf_dbg(CONN, "AP mode link down\n");
complete(&cfg->vif_disabled); complete(&cfg->vif_disabled);
if (ifp->vif->mbss) if (ifp->vif->mbss)
brcmf_remove_interface(ifp->drvr, ifp->bssidx); brcmf_remove_interface(ifp);
return 0; return 0;
} }
@ -6211,9 +6146,10 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
} }
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev) struct device *busdev,
bool p2pdev_forced)
{ {
struct net_device *ndev = drvr->iflist[0]->ndev; struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev;
struct brcmf_cfg80211_info *cfg; struct brcmf_cfg80211_info *cfg;
struct wiphy *wiphy; struct wiphy *wiphy;
struct brcmf_cfg80211_vif *vif; struct brcmf_cfg80211_vif *vif;
@ -6303,7 +6239,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
*cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; *cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
} }
err = brcmf_p2p_attach(cfg); err = brcmf_p2p_attach(cfg, p2pdev_forced);
if (err) { if (err) {
brcmf_err("P2P initilisation failed (%d)\n", err); brcmf_err("P2P initilisation failed (%d)\n", err);
goto wiphy_unreg_out; goto wiphy_unreg_out;
@ -6331,6 +6267,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
priv_out: priv_out:
wl_deinit_priv(cfg); wl_deinit_priv(cfg);
brcmf_free_vif(vif); brcmf_free_vif(vif);
ifp->vif = NULL;
wiphy_out: wiphy_out:
brcmf_free_wiphy(wiphy); brcmf_free_wiphy(wiphy);
return NULL; return NULL;

View file

@ -469,7 +469,8 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
} }
struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev); struct device *busdev,
bool p2pdev_forced);
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
s32 brcmf_cfg80211_up(struct net_device *ndev); s32 brcmf_cfg80211_up(struct net_device *ndev);
s32 brcmf_cfg80211_down(struct net_device *ndev); s32 brcmf_cfg80211_down(struct net_device *ndev);

View file

@ -101,6 +101,9 @@
/* ARM Cortex M3 core, ID 0x82a */ /* ARM Cortex M3 core, ID 0x82a */
#define BCM4329_CORE_ARM_BASE 0x18002000 #define BCM4329_CORE_ARM_BASE 0x18002000
/* Max possibly supported memory size (limited by IO mapped memory) */
#define BRCMF_CHIP_MAX_MEMSIZE (4 * 1024 * 1024)
#define CORE_SB(base, field) \ #define CORE_SB(base, field) \
(base + SBCONFIGOFF + offsetof(struct sbconfig, field)) (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
#define SBCOREREV(sbidh) \ #define SBCOREREV(sbidh) \
@ -205,6 +208,7 @@ struct sbsocramregs {
}; };
#define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f) #define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f)
#define SYSMEMREGOFFS(_f) offsetof(struct sbsocramregs, _f)
#define ARMCR4_CAP (0x04) #define ARMCR4_CAP (0x04)
#define ARMCR4_BANKIDX (0x40) #define ARMCR4_BANKIDX (0x40)
@ -513,6 +517,9 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
case BCMA_CORE_ARM_CR4: case BCMA_CORE_ARM_CR4:
cpu_found = true; cpu_found = true;
break; break;
case BCMA_CORE_ARM_CA7:
cpu_found = true;
break;
default: default:
break; break;
} }
@ -611,6 +618,29 @@ static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
} }
} }
/** Return the SYS MEM size */
static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem)
{
u32 memsize = 0;
u32 coreinfo;
u32 idx;
u32 nb;
u32 banksize;
if (!brcmf_chip_iscoreup(&sysmem->pub))
brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0);
coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo));
nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
for (idx = 0; idx < nb; idx++) {
brcmf_chip_socram_banksize(sysmem, idx, &banksize);
memsize += banksize;
}
return memsize;
}
/** Return the TCM-RAM size of the ARMCR4 core. */ /** Return the TCM-RAM size of the ARMCR4 core. */
static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
{ {
@ -644,6 +674,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
return 0x198000; return 0x198000;
case BRCM_CC_4335_CHIP_ID: case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID: case BRCM_CC_4339_CHIP_ID:
case BRCM_CC_4350_CHIP_ID:
case BRCM_CC_4354_CHIP_ID: case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID: case BRCM_CC_4356_CHIP_ID:
case BRCM_CC_43567_CHIP_ID: case BRCM_CC_43567_CHIP_ID:
@ -652,6 +683,9 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4358_CHIP_ID: case BRCM_CC_4358_CHIP_ID:
case BRCM_CC_43602_CHIP_ID: case BRCM_CC_43602_CHIP_ID:
return 0x180000; return 0x180000;
case BRCM_CC_4365_CHIP_ID:
case BRCM_CC_4366_CHIP_ID:
return 0x200000;
default: default:
brcmf_err("unknown chip: %s\n", ci->pub.name); brcmf_err("unknown chip: %s\n", ci->pub.name);
break; break;
@ -674,10 +708,28 @@ static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
return -EINVAL; return -EINVAL;
} }
} else { } else {
mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM); mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_SYS_MEM);
mem_core = container_of(mem, struct brcmf_core_priv, pub); if (mem) {
brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize, mem_core = container_of(mem, struct brcmf_core_priv,
&ci->pub.srsize); pub);
ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core);
ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
if (!ci->pub.rambase) {
brcmf_err("RAM base not provided with ARM CA7 core\n");
return -EINVAL;
}
} else {
mem = brcmf_chip_get_core(&ci->pub,
BCMA_CORE_INTERNAL_MEM);
if (!mem) {
brcmf_err("No memory cores found\n");
return -ENOMEM;
}
mem_core = container_of(mem, struct brcmf_core_priv,
pub);
brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
&ci->pub.srsize);
}
} }
brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n", brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize, ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
@ -687,6 +739,12 @@ static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
brcmf_err("RAM size is undetermined\n"); brcmf_err("RAM size is undetermined\n");
return -ENOMEM; return -ENOMEM;
} }
if (ci->pub.ramsize > BRCMF_CHIP_MAX_MEMSIZE) {
brcmf_err("RAM size is incorrect\n");
return -ENOMEM;
}
return 0; return 0;
} }
@ -899,13 +957,22 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
/* assure chip is passive for core access */ /* assure chip is passive for core access */
brcmf_chip_set_passive(&ci->pub); brcmf_chip_set_passive(&ci->pub);
/* Call bus specific reset function now. Cores have been determined
* but further access may require a chip specific reset at this point.
*/
if (ci->ops->reset) {
ci->ops->reset(ci->ctx, &ci->pub);
brcmf_chip_set_passive(&ci->pub);
}
return brcmf_chip_get_raminfo(ci); return brcmf_chip_get_raminfo(ci);
} }
static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id) static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
{ {
struct brcmf_core *core; struct brcmf_core *core;
struct brcmf_core_priv *cr4; struct brcmf_core_priv *cpu;
u32 val; u32 val;
@ -918,10 +985,11 @@ static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
brcmf_chip_coredisable(core, 0, 0); brcmf_chip_coredisable(core, 0, 0);
break; break;
case BCMA_CORE_ARM_CR4: case BCMA_CORE_ARM_CR4:
cr4 = container_of(core, struct brcmf_core_priv, pub); case BCMA_CORE_ARM_CA7:
cpu = container_of(core, struct brcmf_core_priv, pub);
/* clear all IOCTL bits except HALT bit */ /* clear all IOCTL bits except HALT bit */
val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL); val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL);
val &= ARMCR4_BCMA_IOCTL_CPUHALT; val &= ARMCR4_BCMA_IOCTL_CPUHALT;
brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT, brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
ARMCR4_BCMA_IOCTL_CPUHALT); ARMCR4_BCMA_IOCTL_CPUHALT);
@ -1143,6 +1211,33 @@ static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
return true; return true;
} }
static inline void
brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip)
{
struct brcmf_core *core;
brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7);
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
D11_BCMA_IOCTL_PHYCLOCKEN,
D11_BCMA_IOCTL_PHYCLOCKEN,
D11_BCMA_IOCTL_PHYCLOCKEN);
}
static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
{
struct brcmf_core *core;
chip->ops->activate(chip->ctx, &chip->pub, rstvec);
/* restore ARM */
core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CA7);
brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
return true;
}
void brcmf_chip_set_passive(struct brcmf_chip *pub) void brcmf_chip_set_passive(struct brcmf_chip *pub)
{ {
struct brcmf_chip_priv *chip; struct brcmf_chip_priv *chip;
@ -1156,8 +1251,16 @@ void brcmf_chip_set_passive(struct brcmf_chip *pub)
brcmf_chip_cr4_set_passive(chip); brcmf_chip_cr4_set_passive(chip);
return; return;
} }
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
brcmf_chip_cm3_set_passive(chip); if (arm) {
brcmf_chip_ca7_set_passive(chip);
return;
}
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
if (arm) {
brcmf_chip_cm3_set_passive(chip);
return;
}
} }
bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec) bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
@ -1171,8 +1274,14 @@ bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
if (arm) if (arm)
return brcmf_chip_cr4_set_active(chip, rstvec); return brcmf_chip_cr4_set_active(chip, rstvec);
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
if (arm)
return brcmf_chip_ca7_set_active(chip, rstvec);
arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
if (arm)
return brcmf_chip_cm3_set_active(chip);
return brcmf_chip_cm3_set_active(chip); return false;
} }
bool brcmf_chip_sr_capable(struct brcmf_chip *pub) bool brcmf_chip_sr_capable(struct brcmf_chip *pub)

View file

@ -73,6 +73,7 @@ struct brcmf_buscore_ops {
u32 (*read32)(void *ctx, u32 addr); u32 (*read32)(void *ctx, u32 addr);
void (*write32)(void *ctx, u32 addr, u32 value); void (*write32)(void *ctx, u32 addr, u32 value);
int (*prepare)(void *ctx); int (*prepare)(void *ctx);
int (*reset)(void *ctx, struct brcmf_chip *chip);
int (*setup)(void *ctx, struct brcmf_chip *chip); int (*setup)(void *ctx, struct brcmf_chip *chip);
void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec); void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
}; };

View file

@ -53,6 +53,8 @@ MODULE_LICENSE("Dual BSD/GPL");
#define BRCMF_RXREORDER_EXPIDX_VALID 0x08 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
#define BRCMF_RXREORDER_NEW_HOLE 0x10 #define BRCMF_RXREORDER_NEW_HOLE 0x10
#define BRCMF_BSSIDX_INVALID -1
/* Error bits */ /* Error bits */
int brcmf_msg_level; int brcmf_msg_level;
module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR); module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
@ -60,10 +62,8 @@ MODULE_PARM_DESC(debug, "level of debug output");
/* P2P0 enable */ /* P2P0 enable */
static int brcmf_p2p_enable; static int brcmf_p2p_enable;
#ifdef CONFIG_BRCMDBG
module_param_named(p2pon, brcmf_p2p_enable, int, 0); module_param_named(p2pon, brcmf_p2p_enable, int, 0);
MODULE_PARM_DESC(p2pon, "enable p2p management functionality"); MODULE_PARM_DESC(p2pon, "enable legacy p2p management functionality");
#endif
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx) char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{ {
@ -83,6 +83,24 @@ char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
return "<if_none>"; return "<if_none>";
} }
struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx)
{
struct brcmf_if *ifp;
s32 bssidx;
if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
brcmf_err("ifidx %d out of range\n", ifidx);
return NULL;
}
ifp = NULL;
bssidx = drvr->if2bss[ifidx];
if (bssidx >= 0)
ifp = drvr->iflist[bssidx];
return ifp;
}
static void _brcmf_set_multicast_list(struct work_struct *work) static void _brcmf_set_multicast_list(struct work_struct *work)
{ {
struct brcmf_if *ifp; struct brcmf_if *ifp;
@ -520,17 +538,15 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr; struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_skb_reorder_data *rd; struct brcmf_skb_reorder_data *rd;
u8 ifidx;
int ret; int ret;
brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb); brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
/* process and remove protocol-specific header */ /* process and remove protocol-specific header */
ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb); ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
ifp = drvr->iflist[ifidx];
if (ret || !ifp || !ifp->ndev) { if (ret || !ifp || !ifp->ndev) {
if ((ret != -ENODATA) && ifp) if (ret != -ENODATA && ifp)
ifp->stats.rx_errors++; ifp->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb); brcmu_pkt_buf_free_skb(skb);
return; return;
@ -543,17 +559,11 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
brcmf_netif_rx(ifp, skb); brcmf_netif_rx(ifp, skb);
} }
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx, void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
bool success)
{ {
struct brcmf_if *ifp;
struct ethhdr *eh; struct ethhdr *eh;
u16 type; u16 type;
ifp = drvr->iflist[ifidx];
if (!ifp)
goto done;
eh = (struct ethhdr *)(txp->data); eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto); type = ntohs(eh->h_proto);
@ -565,7 +575,7 @@ void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
if (!success) if (!success)
ifp->stats.tx_errors++; ifp->stats.tx_errors++;
done:
brcmu_pkt_buf_free_skb(txp); brcmu_pkt_buf_free_skb(txp);
} }
@ -573,17 +583,17 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{ {
struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr; struct brcmf_pub *drvr = bus_if->drvr;
u8 ifidx; struct brcmf_if *ifp;
/* await txstatus signal for firmware if active */ /* await txstatus signal for firmware if active */
if (brcmf_fws_fc_active(drvr->fws)) { if (brcmf_fws_fc_active(drvr->fws)) {
if (!success) if (!success)
brcmf_fws_bustxfail(drvr->fws, txp); brcmf_fws_bustxfail(drvr->fws, txp);
} else { } else {
if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp)) if (brcmf_proto_hdrpull(drvr, false, txp, &ifp))
brcmu_pkt_buf_free_skb(txp); brcmu_pkt_buf_free_skb(txp);
else else
brcmf_txfinalize(drvr, txp, ifidx, success); brcmf_txfinalize(ifp, txp, success);
} }
} }
@ -708,8 +718,6 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
} }
brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name); brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
ndev->destructor = brcmf_cfg80211_free_netdev;
return 0; return 0;
fail: fail:
@ -719,6 +727,14 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
return -EBADE; return -EBADE;
} }
static void brcmf_net_detach(struct net_device *ndev)
{
if (ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(ndev);
else
brcmf_cfg80211_free_netdev(ndev);
}
static int brcmf_net_p2p_open(struct net_device *ndev) static int brcmf_net_p2p_open(struct net_device *ndev)
{ {
brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "Enter\n");
@ -778,7 +794,7 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
} }
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
char *name, u8 *mac_addr) bool is_p2pdev, char *name, u8 *mac_addr)
{ {
struct brcmf_if *ifp; struct brcmf_if *ifp;
struct net_device *ndev; struct net_device *ndev;
@ -795,8 +811,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
ifp->ndev->name); ifp->ndev->name);
if (ifidx) { if (ifidx) {
netif_stop_queue(ifp->ndev); netif_stop_queue(ifp->ndev);
unregister_netdev(ifp->ndev); brcmf_net_detach(ifp->ndev);
free_netdev(ifp->ndev);
drvr->iflist[bssidx] = NULL; drvr->iflist[bssidx] = NULL;
} else { } else {
brcmf_err("ignore IF event\n"); brcmf_err("ignore IF event\n");
@ -804,7 +819,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
} }
} }
if (!brcmf_p2p_enable && bssidx == 1) { if (!brcmf_p2p_enable && is_p2pdev) {
/* this is P2P_DEVICE interface */ /* this is P2P_DEVICE interface */
brcmf_dbg(INFO, "allocate non-netdev interface\n"); brcmf_dbg(INFO, "allocate non-netdev interface\n");
ifp = kzalloc(sizeof(*ifp), GFP_KERNEL); ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
@ -818,8 +833,12 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
if (!ndev) if (!ndev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ndev->destructor = brcmf_cfg80211_free_netdev;
ifp = netdev_priv(ndev); ifp = netdev_priv(ndev);
ifp->ndev = ndev; ifp->ndev = ndev;
/* store mapping ifidx to bssidx */
if (drvr->if2bss[ifidx] == BRCMF_BSSIDX_INVALID)
drvr->if2bss[ifidx] = bssidx;
} }
ifp->drvr = drvr; ifp->drvr = drvr;
@ -850,6 +869,8 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
return; return;
} }
brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx); brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
if (drvr->if2bss[ifp->ifidx] == bssidx)
drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
if (ifp->ndev) { if (ifp->ndev) {
if (bssidx == 0) { if (bssidx == 0) {
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
@ -865,17 +886,28 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
cancel_work_sync(&ifp->setmacaddr_work); cancel_work_sync(&ifp->setmacaddr_work);
cancel_work_sync(&ifp->multicast_work); cancel_work_sync(&ifp->multicast_work);
} }
/* unregister will take care of freeing it */ brcmf_net_detach(ifp->ndev);
unregister_netdev(ifp->ndev); } else {
/* Only p2p device interfaces which get dynamically created
* end up here. In this case the p2p module should be informed
* about the removal of the interface within the firmware. If
* not then p2p commands towards the firmware will cause some
* serious troublesome side effects. The p2p module will clean
* up the ifp if needed.
*/
brcmf_p2p_ifp_removed(ifp);
kfree(ifp);
} }
} }
void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx) void brcmf_remove_interface(struct brcmf_if *ifp)
{ {
if (drvr->iflist[bssidx]) { if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bssidx] != ifp))
brcmf_fws_del_interface(drvr->iflist[bssidx]); return;
brcmf_del_if(drvr, bssidx); brcmf_dbg(TRACE, "Enter, bssidx=%d, ifidx=%d\n", ifp->bssidx,
} ifp->ifidx);
brcmf_fws_del_interface(ifp);
brcmf_del_if(ifp->drvr, ifp->bssidx);
} }
int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr) int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
@ -906,6 +938,7 @@ int brcmf_attach(struct device *dev)
{ {
struct brcmf_pub *drvr = NULL; struct brcmf_pub *drvr = NULL;
int ret = 0; int ret = 0;
int i;
brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(TRACE, "Enter\n");
@ -914,6 +947,9 @@ int brcmf_attach(struct device *dev)
if (!drvr) if (!drvr)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++)
drvr->if2bss[i] = BRCMF_BSSIDX_INVALID;
mutex_init(&drvr->proto_block); mutex_init(&drvr->proto_block);
/* Link to bus module */ /* Link to bus module */
@ -981,12 +1017,12 @@ int brcmf_bus_start(struct device *dev)
brcmf_dbg(TRACE, "\n"); brcmf_dbg(TRACE, "\n");
/* add primary networking interface */ /* add primary networking interface */
ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL); ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL);
if (IS_ERR(ifp)) if (IS_ERR(ifp))
return PTR_ERR(ifp); return PTR_ERR(ifp);
if (brcmf_p2p_enable) if (brcmf_p2p_enable)
p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL); p2p_ifp = brcmf_add_if(drvr, 1, 0, false, "p2p%d", NULL);
else else
p2p_ifp = NULL; p2p_ifp = NULL;
if (IS_ERR(p2p_ifp)) if (IS_ERR(p2p_ifp))
@ -1017,7 +1053,8 @@ int brcmf_bus_start(struct device *dev)
brcmf_fws_add_interface(ifp); brcmf_fws_add_interface(ifp);
drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev); drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev,
brcmf_p2p_enable);
if (drvr->config == NULL) { if (drvr->config == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
@ -1031,17 +1068,20 @@ int brcmf_bus_start(struct device *dev)
fail: fail:
if (ret < 0) { if (ret < 0) {
brcmf_err("failed: %d\n", ret); brcmf_err("failed: %d\n", ret);
brcmf_cfg80211_detach(drvr->config); if (drvr->config) {
brcmf_cfg80211_detach(drvr->config);
drvr->config = NULL;
}
if (drvr->fws) { if (drvr->fws) {
brcmf_fws_del_interface(ifp); brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr); brcmf_fws_deinit(drvr);
} }
if (drvr->iflist[0]) { if (drvr->iflist[0]) {
free_netdev(ifp->ndev); brcmf_net_detach(ifp->ndev);
drvr->iflist[0] = NULL; drvr->iflist[0] = NULL;
} }
if (p2p_ifp) { if (p2p_ifp) {
free_netdev(p2p_ifp->ndev); brcmf_net_detach(p2p_ifp->ndev);
drvr->iflist[1] = NULL; drvr->iflist[1] = NULL;
} }
return ret; return ret;
@ -1105,7 +1145,7 @@ void brcmf_detach(struct device *dev)
/* make sure primary interface removed last */ /* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--) for (i = BRCMF_MAX_IFS-1; i > -1; i--)
brcmf_remove_interface(drvr, i); brcmf_remove_interface(drvr->iflist[i]);
brcmf_cfg80211_detach(drvr->config); brcmf_cfg80211_detach(drvr->config);

View file

@ -122,6 +122,7 @@ struct brcmf_pub {
struct mac_address addresses[BRCMF_MAX_IFS]; struct mac_address addresses[BRCMF_MAX_IFS];
struct brcmf_if *iflist[BRCMF_MAX_IFS]; struct brcmf_if *iflist[BRCMF_MAX_IFS];
s32 if2bss[BRCMF_MAX_IFS];
struct mutex proto_block; struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN]; unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
@ -202,16 +203,15 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
/* Return pointer to interface name */ /* Return pointer to interface name */
char *brcmf_ifname(struct brcmf_pub *drvr, int idx); char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx);
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx, struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
char *name, u8 *mac_addr); bool is_p2pdev, char *name, u8 *mac_addr);
void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx); void brcmf_remove_interface(struct brcmf_if *ifp);
int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
void brcmf_txflowblock_if(struct brcmf_if *ifp, void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state); enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx, void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
/* Sets dongle media info (drv_version, mac address). */ /* Sets dongle media info (drv_version, mac address). */

View file

@ -37,6 +37,7 @@
#define BRCMF_SDIO_VAL 0x00020000 #define BRCMF_SDIO_VAL 0x00020000
#define BRCMF_MSGBUF_VAL 0x00040000 #define BRCMF_MSGBUF_VAL 0x00040000
#define BRCMF_PCIE_VAL 0x00080000 #define BRCMF_PCIE_VAL 0x00080000
#define BRCMF_FWCON_VAL 0x00100000
/* set default print format */ /* set default print format */
#undef pr_fmt #undef pr_fmt
@ -78,6 +79,7 @@ do { \
#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL) #define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL) #define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL) #define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
#define BRCMF_FWCON_ON() (brcmf_msg_level & BRCMF_FWCON_VAL)
#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */ #else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
@ -90,6 +92,7 @@ do { \
#define BRCMF_GLOM_ON() 0 #define BRCMF_GLOM_ON() 0
#define BRCMF_EVENT_ON() 0 #define BRCMF_EVENT_ON() 0
#define BRCMF_FIL_ON() 0 #define BRCMF_FIL_ON() 0
#define BRCMF_FWCON_ON() 0
#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */ #endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */

View file

@ -15,6 +15,7 @@
*/ */
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/module.h>
#include <brcm_hw_ids.h> #include <brcm_hw_ids.h>
#include "core.h" #include "core.h"
@ -23,6 +24,12 @@
#include "fwil.h" #include "fwil.h"
#include "feature.h" #include "feature.h"
/* Module param feature_disable (global for all devices) */
static int brcmf_feature_disable;
module_param_named(feature_disable, brcmf_feature_disable, int, 0);
MODULE_PARM_DESC(feature_disable, "Disable features");
/* /*
* expand feature list to array of feature strings. * expand feature list to array of feature strings.
*/ */
@ -121,7 +128,7 @@ static void brcmf_feat_iovar_int_set(struct brcmf_if *ifp,
void brcmf_feat_attach(struct brcmf_pub *drvr) void brcmf_feat_attach(struct brcmf_pub *drvr)
{ {
struct brcmf_if *ifp = drvr->iflist[0]; struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
@ -131,6 +138,12 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_P2P, "p2p"); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_P2P, "p2p");
if (brcmf_feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags, brcmf_feature_disable);
ifp->drvr->feat_flags &= ~brcmf_feature_disable;
}
/* set chip related quirks */ /* set chip related quirks */
switch (drvr->bus_if->chip) { switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID: case BRCM_CC_43236_CHIP_ID:

View file

@ -221,7 +221,7 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
bus_if = dev_get_drvdata(flow->dev); bus_if = dev_get_drvdata(flow->dev);
drvr = bus_if->drvr; drvr = bus_if->drvr;
ifp = drvr->iflist[ifidx]; ifp = brcmf_get_ifp(drvr, ifidx);
brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked); brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
spin_unlock_irqrestore(&flow->block_lock, flags); spin_unlock_irqrestore(&flow->block_lock, flags);

View file

@ -34,7 +34,7 @@ enum ring_status {
}; };
struct brcmf_flowring_ring { struct brcmf_flowring_ring {
u8 hash_id; u16 hash_id;
bool blocked; bool blocked;
enum ring_status status; enum ring_status status;
struct sk_buff_head skblist; struct sk_buff_head skblist;

View file

@ -179,25 +179,28 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
{ {
struct brcmf_if_event *ifevent = data; struct brcmf_if_event *ifevent = data;
struct brcmf_if *ifp; struct brcmf_if *ifp;
bool is_p2pdev;
int err = 0; int err = 0;
brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u role: %u\n", brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u role: %u\n",
ifevent->action, ifevent->ifidx, ifevent->bssidx, ifevent->action, ifevent->ifidx, ifevent->bssidx,
ifevent->flags, ifevent->role); ifevent->flags, ifevent->role);
/* The P2P Device interface event must not be ignored /* The P2P Device interface event must not be ignored contrary to what
* contrary to what firmware tells us. The only way to * firmware tells us. Older firmware uses p2p noif, with sta role.
* distinguish the P2P Device is by looking at the ifidx * This should be accepted when p2pdev_setup is ongoing. TDLS setup will
* and bssidx received. * use the same ifevent and should be ignored.
*/ */
if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) && is_p2pdev = ((ifevent->flags & BRCMF_E_IF_FLAG_NOIF) &&
(ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) { (ifevent->role == BRCMF_E_IF_ROLE_P2P_CLIENT ||
((ifevent->role == BRCMF_E_IF_ROLE_STA) &&
(drvr->fweh.p2pdev_setup_ongoing))));
if (!is_p2pdev && (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
brcmf_dbg(EVENT, "event can be ignored\n"); brcmf_dbg(EVENT, "event can be ignored\n");
return; return;
} }
if (ifevent->ifidx >= BRCMF_MAX_IFS) { if (ifevent->ifidx >= BRCMF_MAX_IFS) {
brcmf_err("invalid interface index: %u\n", brcmf_err("invalid interface index: %u\n", ifevent->ifidx);
ifevent->ifidx);
return; return;
} }
@ -207,7 +210,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname, brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
emsg->addr); emsg->addr);
ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx, ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx,
emsg->ifname, emsg->addr); is_p2pdev, emsg->ifname, emsg->addr);
if (IS_ERR(ifp)) if (IS_ERR(ifp))
return; return;
brcmf_fws_add_interface(ifp); brcmf_fws_add_interface(ifp);
@ -222,7 +225,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
if (ifp && ifevent->action == BRCMF_E_IF_DEL) if (ifp && ifevent->action == BRCMF_E_IF_DEL)
brcmf_remove_interface(drvr, ifevent->bssidx); brcmf_remove_interface(ifp);
} }
/** /**
@ -297,8 +300,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
goto event_free; goto event_free;
} }
if ((event->code == BRCMF_E_TDLS_PEER_EVENT) && if (event->code == BRCMF_E_TDLS_PEER_EVENT)
(emsg.bsscfgidx == 1))
ifp = drvr->iflist[0]; ifp = drvr->iflist[0];
else else
ifp = drvr->iflist[emsg.bsscfgidx]; ifp = drvr->iflist[emsg.bsscfgidx];
@ -314,6 +316,17 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
} }
} }
/**
* brcmf_fweh_p2pdev_setup() - P2P device setup ongoing (or not).
*
* @ifp: ifp on which setup is taking place or finished.
* @ongoing: p2p device setup in progress (or not).
*/
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
{
ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing;
}
/** /**
* brcmf_fweh_attach() - initialize firmware event handling. * brcmf_fweh_attach() - initialize firmware event handling.
* *
@ -335,7 +348,7 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr)
void brcmf_fweh_detach(struct brcmf_pub *drvr) void brcmf_fweh_detach(struct brcmf_pub *drvr)
{ {
struct brcmf_fweh_info *fweh = &drvr->fweh; struct brcmf_fweh_info *fweh = &drvr->fweh;
struct brcmf_if *ifp = drvr->iflist[0]; struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
s8 eventmask[BRCMF_EVENTING_MASK_LEN]; s8 eventmask[BRCMF_EVENTING_MASK_LEN];
if (ifp) { if (ifp) {

View file

@ -230,12 +230,14 @@ typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
/** /**
* struct brcmf_fweh_info - firmware event handling information. * struct brcmf_fweh_info - firmware event handling information.
* *
* @p2pdev_setup_ongoing: P2P device creation in progress.
* @event_work: event worker. * @event_work: event worker.
* @evt_q_lock: lock for event queue protection. * @evt_q_lock: lock for event queue protection.
* @event_q: event queue. * @event_q: event queue.
* @evt_handler: registered event handlers. * @evt_handler: registered event handlers.
*/ */
struct brcmf_fweh_info { struct brcmf_fweh_info {
bool p2pdev_setup_ongoing;
struct work_struct event_work; struct work_struct event_work;
spinlock_t evt_q_lock; spinlock_t evt_q_lock;
struct list_head event_q; struct list_head event_q;
@ -255,6 +257,7 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
int brcmf_fweh_activate_events(struct brcmf_if *ifp); int brcmf_fweh_activate_events(struct brcmf_if *ifp);
void brcmf_fweh_process_event(struct brcmf_pub *drvr, void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet); struct brcmf_event *event_packet);
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
struct sk_buff *skb) struct sk_buff *skb)

View file

@ -972,7 +972,7 @@ static void
brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
u8 if_id) u8 if_id)
{ {
struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1]; struct brcmf_if *ifp = brcmf_get_ifp(fws->drvr, if_id);
if (WARN_ON(!ifp)) if (WARN_ON(!ifp))
return; return;
@ -1398,7 +1398,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
} }
static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *skb, u8 ifidx, struct sk_buff *skb,
u32 genbit, u16 seq) u32 genbit, u16 seq)
{ {
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
@ -1448,7 +1448,7 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
struct sk_buff *skb; struct sk_buff *skb;
struct brcmf_skbuff_cb *skcb; struct brcmf_skbuff_cb *skcb;
struct brcmf_fws_mac_descriptor *entry = NULL; struct brcmf_fws_mac_descriptor *entry = NULL;
u8 ifidx; struct brcmf_if *ifp;
brcmf_dbg(DATA, "flags %d\n", flags); brcmf_dbg(DATA, "flags %d\n", flags);
@ -1497,15 +1497,16 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
} }
brcmf_fws_macdesc_return_req_credit(skb); brcmf_fws_macdesc_return_req_credit(skb);
if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) { ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp);
if (ret) {
brcmu_pkt_buf_free_skb(skb); brcmu_pkt_buf_free_skb(skb);
return -EINVAL; return -EINVAL;
} }
if (!remove_from_hanger) if (!remove_from_hanger)
ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx, ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb,
genbit, seq); genbit, seq);
if (remove_from_hanger || ret) if (remove_from_hanger || ret)
brcmf_txfinalize(fws->drvr, skb, ifidx, true); brcmf_txfinalize(ifp, skb, true);
return 0; return 0;
} }
@ -1615,11 +1616,10 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
return 0; return 0;
} }
int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct brcmf_skb_reorder_data *rd; struct brcmf_skb_reorder_data *rd;
struct brcmf_fws_info *fws = drvr->fws; struct brcmf_fws_info *fws = ifp->drvr->fws;
u8 *signal_data; u8 *signal_data;
s16 data_len; s16 data_len;
u8 type; u8 type;
@ -1629,20 +1629,20 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
s32 err; s32 err;
brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n", brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n",
ifidx, skb->len, signal_len); ifp->ifidx, skb->len, siglen);
WARN_ON(signal_len > skb->len); WARN_ON(siglen > skb->len);
if (!signal_len) if (!siglen)
return 0; return;
/* if flow control disabled, skip to packet data and leave */ /* if flow control disabled, skip to packet data and leave */
if ((!fws) || (!fws->fw_signals)) { if ((!fws) || (!fws->fw_signals)) {
skb_pull(skb, signal_len); skb_pull(skb, siglen);
return 0; return;
} }
fws->stats.header_pulls++; fws->stats.header_pulls++;
data_len = signal_len; data_len = siglen;
signal_data = skb->data; signal_data = skb->data;
status = BRCMF_FWS_RET_OK_NOSCHEDULE; status = BRCMF_FWS_RET_OK_NOSCHEDULE;
@ -1730,14 +1730,12 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
/* signalling processing result does /* signalling processing result does
* not affect the actual ethernet packet. * not affect the actual ethernet packet.
*/ */
skb_pull(skb, signal_len); skb_pull(skb, siglen);
/* this may be a signal-only packet /* this may be a signal-only packet
*/ */
if (skb->len == 0) if (skb->len == 0)
fws->stats.header_only_pkt++; fws->stats.header_only_pkt++;
return 0;
} }
static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
@ -1848,7 +1846,7 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
entry->transit_count--; entry->transit_count--;
if (entry->suppressed) if (entry->suppressed)
entry->suppr_transit_count--; entry->suppr_transit_count--;
brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb); (void)brcmf_proto_hdrpull(fws->drvr, false, skb, NULL);
goto rollback; goto rollback;
} }
@ -1904,7 +1902,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if (fws->avoid_queueing) { if (fws->avoid_queueing) {
rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb); rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
if (rc < 0) if (rc < 0)
brcmf_txfinalize(drvr, skb, ifp->ifidx, false); brcmf_txfinalize(ifp, skb, false);
return rc; return rc;
} }
@ -1928,7 +1926,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_schedule_deq(fws); brcmf_fws_schedule_deq(fws);
} else { } else {
brcmf_err("drop skb: no hanger slot\n"); brcmf_err("drop skb: no hanger slot\n");
brcmf_txfinalize(drvr, skb, ifp->ifidx, false); brcmf_txfinalize(ifp, skb, false);
rc = -ENOMEM; rc = -ENOMEM;
} }
brcmf_fws_unlock(fws); brcmf_fws_unlock(fws);
@ -2008,8 +2006,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
ret = brcmf_proto_txdata(drvr, ifidx, 0, skb); ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
brcmf_fws_lock(fws); brcmf_fws_lock(fws);
if (ret < 0) if (ret < 0)
brcmf_txfinalize(drvr, skb, ifidx, brcmf_txfinalize(brcmf_get_ifp(drvr,
false); ifidx),
skb, false);
if (fws->bus_flow_blocked) if (fws->bus_flow_blocked)
break; break;
} }
@ -2117,6 +2116,7 @@ static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data)
int brcmf_fws_init(struct brcmf_pub *drvr) int brcmf_fws_init(struct brcmf_pub *drvr)
{ {
struct brcmf_fws_info *fws; struct brcmf_fws_info *fws;
struct brcmf_if *ifp;
u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS; u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
int rc; int rc;
u32 mode; u32 mode;
@ -2176,21 +2176,22 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
* continue. Set mode back to none indicating not enabled. * continue. Set mode back to none indicating not enabled.
*/ */
fws->fw_signals = true; fws->fw_signals = true;
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) { ifp = brcmf_get_ifp(drvr, 0);
if (brcmf_fil_iovar_int_set(ifp, "tlv", tlv)) {
brcmf_err("failed to set bdcv2 tlv signaling\n"); brcmf_err("failed to set bdcv2 tlv signaling\n");
fws->fcmode = BRCMF_FWS_FCMODE_NONE; fws->fcmode = BRCMF_FWS_FCMODE_NONE;
fws->fw_signals = false; fws->fw_signals = false;
} }
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1)) if (brcmf_fil_iovar_int_set(ifp, "ampdu_hostreorder", 1))
brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n"); brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
/* Enable seq number reuse, if supported */ /* Enable seq number reuse, if supported */
if (brcmf_fil_iovar_int_get(drvr->iflist[0], "wlfc_mode", &mode) == 0) { if (brcmf_fil_iovar_int_get(ifp, "wlfc_mode", &mode) == 0) {
if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) { if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) {
mode = 0; mode = 0;
BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1); BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1);
if (brcmf_fil_iovar_int_set(drvr->iflist[0], if (brcmf_fil_iovar_int_set(ifp,
"wlfc_mode", mode) == 0) { "wlfc_mode", mode) == 0) {
BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1); BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1);
} }

View file

@ -21,8 +21,7 @@
int brcmf_fws_init(struct brcmf_pub *drvr); int brcmf_fws_init(struct brcmf_pub *drvr);
void brcmf_fws_deinit(struct brcmf_pub *drvr); void brcmf_fws_deinit(struct brcmf_pub *drvr);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws); bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len, void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);
struct sk_buff *skb);
int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb); int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_fws_reset_interface(struct brcmf_if *ifp); void brcmf_fws_reset_interface(struct brcmf_if *ifp);

View file

@ -522,7 +522,7 @@ static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
u8 *ifidx, struct sk_buff *skb) struct sk_buff *skb, struct brcmf_if **ifp)
{ {
return -ENODEV; return -ENODEV;
} }
@ -873,7 +873,11 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
commonring = msgbuf->flowrings[flowid]; commonring = msgbuf->flowrings[flowid];
atomic_dec(&commonring->outstanding_tx); atomic_dec(&commonring->outstanding_tx);
brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true); /* Hante: i believe this was a bug as tx_status->msg.ifidx was used
* in brcmf_txfinalize as index in drvr->iflist. Can you confirm/deny?
*/
brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
skb, true);
} }
@ -1081,15 +1085,7 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
{ {
struct brcmf_if *ifp; struct brcmf_if *ifp;
/* The ifidx is the idx to map to matching netdev/ifp. When receiving ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
* events this is easy because it contains the bssidx which maps
* 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
* bssidx 1 is used for p2p0 and no data can be received or
* transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
*/
if (ifidx)
(ifidx)++;
ifp = msgbuf->drvr->iflist[ifidx];
if (!ifp || !ifp->ndev) { if (!ifp || !ifp->ndev) {
brcmf_err("Received pkt for invalid ifidx %d\n", ifidx); brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
brcmu_pkt_buf_free_skb(skb); brcmu_pkt_buf_free_skb(skb);

View file

@ -2084,11 +2084,13 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr); brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
brcmf_cfg80211_arm_vif_event(p2p->cfg, p2p_vif); brcmf_cfg80211_arm_vif_event(p2p->cfg, p2p_vif);
brcmf_fweh_p2pdev_setup(pri_ifp, true);
/* Initialize P2P Discovery in the firmware */ /* Initialize P2P Discovery in the firmware */
err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
if (err < 0) { if (err < 0) {
brcmf_err("set p2p_disc error\n"); brcmf_err("set p2p_disc error\n");
brcmf_fweh_p2pdev_setup(pri_ifp, false);
brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL); brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
goto fail; goto fail;
} }
@ -2097,6 +2099,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD, err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD,
msecs_to_jiffies(1500)); msecs_to_jiffies(1500));
brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL); brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
brcmf_fweh_p2pdev_setup(pri_ifp, false);
if (!err) { if (!err) {
brcmf_err("timeout occurred\n"); brcmf_err("timeout occurred\n");
err = -EIO; err = -EIO;
@ -2130,20 +2133,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
return ERR_PTR(err); return ERR_PTR(err);
} }
/**
* brcmf_p2p_delete_p2pdev() - delete P2P_DEVICE virtual interface.
*
* @vif: virtual interface object to delete.
*/
static void brcmf_p2p_delete_p2pdev(struct brcmf_p2p_info *p2p,
struct brcmf_cfg80211_vif *vif)
{
cfg80211_unregister_wdev(&vif->wdev);
p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
brcmf_remove_interface(vif->ifp->drvr, vif->ifp->bssidx);
brcmf_free_vif(vif);
}
/** /**
* brcmf_p2p_add_vif() - create a new P2P virtual interface. * brcmf_p2p_add_vif() - create a new P2P virtual interface.
* *
@ -2255,6 +2244,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
brcmf_dbg(TRACE, "delete P2P vif\n"); brcmf_dbg(TRACE, "delete P2P vif\n");
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
brcmf_cfg80211_arm_vif_event(cfg, vif);
switch (vif->wdev.iftype) { switch (vif->wdev.iftype) {
case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_CLIENT:
if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
@ -2267,10 +2257,10 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
break; break;
case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_P2P_DEVICE:
if (!p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
return 0;
brcmf_p2p_cancel_remain_on_channel(vif->ifp); brcmf_p2p_cancel_remain_on_channel(vif->ifp);
brcmf_p2p_deinit_discovery(p2p); brcmf_p2p_deinit_discovery(p2p);
brcmf_p2p_delete_p2pdev(p2p, vif);
return 0;
default: default:
return -ENOTSUPP; return -ENOTSUPP;
} }
@ -2282,10 +2272,11 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
wait_for_completion_timeout(&cfg->vif_disabled, wait_for_completion_timeout(&cfg->vif_disabled,
msecs_to_jiffies(500)); msecs_to_jiffies(500));
brcmf_vif_clear_mgmt_ies(vif); err = 0;
if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
brcmf_cfg80211_arm_vif_event(cfg, vif); brcmf_vif_clear_mgmt_ies(vif);
err = brcmf_p2p_release_p2p_if(vif); err = brcmf_p2p_release_p2p_if(vif);
}
if (!err) { if (!err) {
/* wait for firmware event */ /* wait for firmware event */
err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL, err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
@ -2295,12 +2286,31 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
else else
err = 0; err = 0;
} }
if (err)
brcmf_remove_interface(vif->ifp);
brcmf_cfg80211_arm_vif_event(cfg, NULL); brcmf_cfg80211_arm_vif_event(cfg, NULL);
p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
return err; return err;
} }
void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
{
struct brcmf_cfg80211_info *cfg;
struct brcmf_cfg80211_vif *vif;
brcmf_dbg(INFO, "P2P: device interface removed\n");
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
rtnl_lock();
cfg80211_unregister_wdev(&vif->wdev);
rtnl_unlock();
brcmf_free_vif(vif);
}
int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev) int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{ {
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
@ -2324,11 +2334,19 @@ void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev)
struct brcmf_cfg80211_vif *vif; struct brcmf_cfg80211_vif *vif;
vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
mutex_lock(&cfg->usr_sync); /* This call can be result of the unregister_wdev call. In that case
(void)brcmf_p2p_deinit_discovery(p2p); * we dont want to do anything anymore. Just return. The config vif
brcmf_abort_scanning(cfg); * will have been cleared at this point.
clear_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state); */
mutex_unlock(&cfg->usr_sync); if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif == vif) {
mutex_lock(&cfg->usr_sync);
/* Set the discovery state to SCAN */
(void)brcmf_p2p_set_discover_state(vif->ifp,
WL_P2P_DISC_ST_SCAN, 0, 0);
brcmf_abort_scanning(cfg);
clear_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
mutex_unlock(&cfg->usr_sync);
}
} }
/** /**
@ -2336,7 +2354,7 @@ void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev)
* *
* @cfg: driver private data for cfg80211 interface. * @cfg: driver private data for cfg80211 interface.
*/ */
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg) s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
{ {
struct brcmf_if *pri_ifp; struct brcmf_if *pri_ifp;
struct brcmf_if *p2p_ifp; struct brcmf_if *p2p_ifp;
@ -2351,11 +2369,15 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
drvr = cfg->pub; drvr = cfg->pub;
pri_ifp = drvr->iflist[0]; pri_ifp = brcmf_get_ifp(drvr, 0);
p2p_ifp = drvr->iflist[1];
p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
if (p2pdev_forced) {
p2p_ifp = drvr->iflist[1];
} else {
p2p_ifp = NULL;
p2p->p2pdev_dynamically = true;
}
if (p2p_ifp) { if (p2p_ifp) {
p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE, p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
false); false);
@ -2377,6 +2399,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN); memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr); brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
brcmf_fweh_p2pdev_setup(pri_ifp, true);
/* Initialize P2P Discovery in the firmware */ /* Initialize P2P Discovery in the firmware */
err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
if (err < 0) { if (err < 0) {
@ -2403,8 +2427,9 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler); INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
init_completion(&p2p->afx_hdl.act_frm_scan); init_completion(&p2p->afx_hdl.act_frm_scan);
init_completion(&p2p->wait_next_af); init_completion(&p2p->wait_next_af);
}
exit: exit:
brcmf_fweh_p2pdev_setup(pri_ifp, false);
}
return err; return err;
} }
@ -2421,10 +2446,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
if (vif != NULL) { if (vif != NULL) {
brcmf_p2p_cancel_remain_on_channel(vif->ifp); brcmf_p2p_cancel_remain_on_channel(vif->ifp);
brcmf_p2p_deinit_discovery(p2p); brcmf_p2p_deinit_discovery(p2p);
/* remove discovery interface */ brcmf_remove_interface(vif->ifp);
rtnl_lock();
brcmf_p2p_delete_p2pdev(p2p, vif);
rtnl_unlock();
} }
/* just set it all to zero */ /* just set it all to zero */
memset(p2p, 0, sizeof(*p2p)); memset(p2p, 0, sizeof(*p2p));

View file

@ -124,6 +124,7 @@ struct afx_hdl {
* @wait_next_af: thread synchronizing struct. * @wait_next_af: thread synchronizing struct.
* @gon_req_action: about to send go negotiation requets frame. * @gon_req_action: about to send go negotiation requets frame.
* @block_gon_req_tx: drop tx go negotiation requets frame. * @block_gon_req_tx: drop tx go negotiation requets frame.
* @p2pdev_dynamically: is p2p device if created by module param or supplicant.
*/ */
struct brcmf_p2p_info { struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg; struct brcmf_cfg80211_info *cfg;
@ -144,9 +145,10 @@ struct brcmf_p2p_info {
struct completion wait_next_af; struct completion wait_next_af;
bool gon_req_action; bool gon_req_action;
bool block_gon_req_tx; bool block_gon_req_tx;
bool p2pdev_dynamically;
}; };
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg); s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
void brcmf_p2p_detach(struct brcmf_p2p_info *p2p); void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
unsigned char name_assign_type, unsigned char name_assign_type,
@ -155,6 +157,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
enum brcmf_fil_p2p_if_types if_type); enum brcmf_fil_p2p_if_types if_type);
void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_scan_prep(struct wiphy *wiphy, int brcmf_p2p_scan_prep(struct wiphy *wiphy,

View file

@ -47,12 +47,18 @@ enum brcmf_pcie_state {
#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin" #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt" #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
#define BRCMF_PCIE_4350_FW_NAME "brcm/brcmfmac4350-pcie.bin"
#define BRCMF_PCIE_4350_NVRAM_NAME "brcm/brcmfmac4350-pcie.txt"
#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin" #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt" #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin" #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt" #define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
#define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin" #define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin"
#define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt" #define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt"
#define BRCMF_PCIE_4365_FW_NAME "brcm/brcmfmac4365b-pcie.bin"
#define BRCMF_PCIE_4365_NVRAM_NAME "brcm/brcmfmac4365b-pcie.txt"
#define BRCMF_PCIE_4366_FW_NAME "brcm/brcmfmac4366b-pcie.bin"
#define BRCMF_PCIE_4366_NVRAM_NAME "brcm/brcmfmac4366b-pcie.txt"
#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
@ -74,6 +80,8 @@ enum brcmf_pcie_state {
#define BRCMF_PCIE_REG_INTMASK 0x94 #define BRCMF_PCIE_REG_INTMASK 0x94
#define BRCMF_PCIE_REG_SBMBX 0x98 #define BRCMF_PCIE_REG_SBMBX 0x98
#define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
#define BRCMF_PCIE_PCIE2REG_INTMASK 0x24 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
#define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
#define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
@ -192,12 +200,18 @@ enum brcmf_pcie_state {
MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME); MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME); MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4350_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4350_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME); MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME); MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME); MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME); MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME); MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME); MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4365_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME);
struct brcmf_pcie_console { struct brcmf_pcie_console {
@ -466,6 +480,7 @@ brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo) static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
{ {
struct brcmf_core *core;
u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD, u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
BRCMF_PCIE_CFGREG_PM_CSR, BRCMF_PCIE_CFGREG_PM_CSR,
BRCMF_PCIE_CFGREG_MSI_CAP, BRCMF_PCIE_CFGREG_MSI_CAP,
@ -484,32 +499,38 @@ static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
if (!devinfo->ci) if (!devinfo->ci)
return; return;
/* Disable ASPM */
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); &lsc);
lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val); pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
val);
/* Watchdog reset */
brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON); brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
WRITECC32(devinfo, watchdog, 4); WRITECC32(devinfo, watchdog, 4);
msleep(100); msleep(100);
/* Restore ASPM */
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL); lsc);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc);
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { if (core->rev <= 13) {
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
cfg_offset[i]); brcmf_pcie_write_reg32(devinfo,
val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
BRCMF_PCIE_PCIE2REG_CONFIGDATA); cfg_offset[i]);
brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", val = brcmf_pcie_read_reg32(devinfo,
cfg_offset[i], val); BRCMF_PCIE_PCIE2REG_CONFIGDATA);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
val); cfg_offset[i], val);
brcmf_pcie_write_reg32(devinfo,
BRCMF_PCIE_PCIE2REG_CONFIGDATA,
val);
}
} }
} }
@ -519,8 +540,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
u32 config; u32 config;
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0)
brcmf_pcie_reset_device(devinfo);
/* BAR1 window may not be sized properly */ /* BAR1 window may not be sized properly */
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
@ -644,7 +663,7 @@ static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET; addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr); console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n", brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
console->base_addr, console->buf_addr, console->bufsize); console->base_addr, console->buf_addr, console->bufsize);
} }
@ -656,6 +675,9 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
u8 ch; u8 ch;
u32 newidx; u32 newidx;
if (!BRCMF_FWCON_ON())
return;
console = &devinfo->shared.console; console = &devinfo->shared.console;
addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET; addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
newidx = brcmf_pcie_read_tcm32(devinfo, addr); newidx = brcmf_pcie_read_tcm32(devinfo, addr);
@ -677,7 +699,7 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
} }
if (ch == '\n') { if (ch == '\n') {
console->log_str[console->log_idx] = 0; console->log_str[console->log_idx] = 0;
brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str); pr_debug("CONSOLE: %s", console->log_str);
console->log_idx = 0; console->log_idx = 0;
} }
} }
@ -1408,6 +1430,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
fw_name = BRCMF_PCIE_43602_FW_NAME; fw_name = BRCMF_PCIE_43602_FW_NAME;
nvram_name = BRCMF_PCIE_43602_NVRAM_NAME; nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
break; break;
case BRCM_CC_4350_CHIP_ID:
fw_name = BRCMF_PCIE_4350_FW_NAME;
nvram_name = BRCMF_PCIE_4350_NVRAM_NAME;
break;
case BRCM_CC_4356_CHIP_ID: case BRCM_CC_4356_CHIP_ID:
fw_name = BRCMF_PCIE_4356_FW_NAME; fw_name = BRCMF_PCIE_4356_FW_NAME;
nvram_name = BRCMF_PCIE_4356_NVRAM_NAME; nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
@ -1422,6 +1448,14 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
fw_name = BRCMF_PCIE_4358_FW_NAME; fw_name = BRCMF_PCIE_4358_FW_NAME;
nvram_name = BRCMF_PCIE_4358_NVRAM_NAME; nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
break; break;
case BRCM_CC_4365_CHIP_ID:
fw_name = BRCMF_PCIE_4365_FW_NAME;
nvram_name = BRCMF_PCIE_4365_NVRAM_NAME;
break;
case BRCM_CC_4366_CHIP_ID:
fw_name = BRCMF_PCIE_4366_FW_NAME;
nvram_name = BRCMF_PCIE_4366_NVRAM_NAME;
break;
default: default:
brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip); brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
return -ENODEV; return -ENODEV;
@ -1633,6 +1667,23 @@ static int brcmf_pcie_buscoreprep(void *ctx)
} }
static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
u32 val;
devinfo->ci = chip;
brcmf_pcie_reset_device(devinfo);
val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
if (val != 0xffffffff)
brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
val);
return 0;
}
static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip, static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
u32 rstvec) u32 rstvec)
{ {
@ -1644,6 +1695,7 @@ static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.prepare = brcmf_pcie_buscoreprep, .prepare = brcmf_pcie_buscoreprep,
.reset = brcmf_pcie_buscore_reset,
.activate = brcmf_pcie_buscore_activate, .activate = brcmf_pcie_buscore_activate,
.read32 = brcmf_pcie_buscore_read32, .read32 = brcmf_pcie_buscore_read32,
.write32 = brcmf_pcie_buscore_write32, .write32 = brcmf_pcie_buscore_write32,
@ -1811,7 +1863,6 @@ brcmf_pcie_remove(struct pci_dev *pdev)
brcmf_pcie_intr_disable(devinfo); brcmf_pcie_intr_disable(devinfo);
brcmf_detach(&pdev->dev); brcmf_detach(&pdev->dev);
brcmf_pcie_reset_device(devinfo);
kfree(bus->bus_priv.pcie); kfree(bus->bus_priv.pcie);
kfree(bus->msgbuf->flowrings); kfree(bus->msgbuf->flowrings);
@ -1929,6 +1980,7 @@ static int brcmf_pcie_resume(struct pci_dev *pdev)
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
static struct pci_device_id brcmf_pcie_devid_table[] = { static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
@ -1937,6 +1989,12 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
{ /* end: all zeroes */ } { /* end: all zeroes */ }
}; };

View file

@ -24,8 +24,8 @@ enum proto_addr_mode {
struct brcmf_proto { struct brcmf_proto {
int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx, int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *skb); struct sk_buff *skb, struct brcmf_if **ifp);
int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
void *buf, uint len); void *buf, uint len);
int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf, int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
@ -46,9 +46,19 @@ int brcmf_proto_attach(struct brcmf_pub *drvr);
void brcmf_proto_detach(struct brcmf_pub *drvr); void brcmf_proto_detach(struct brcmf_pub *drvr);
static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
u8 *ifidx, struct sk_buff *skb) struct sk_buff *skb,
struct brcmf_if **ifp)
{ {
return drvr->proto->hdrpull(drvr, do_fws, ifidx, skb); struct brcmf_if *tmp = NULL;
/* assure protocol is always called with
* non-null initialized pointer.
*/
if (ifp)
*ifp = NULL;
else
ifp = &tmp;
return drvr->proto->hdrpull(drvr, do_fws, skb, ifp);
} }
static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx, static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len) uint cmd, void *buf, uint len)

View file

@ -15,6 +15,7 @@
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <linux/atomic.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/printk.h> #include <linux/printk.h>
@ -123,6 +124,7 @@ struct rte_console {
#define BRCMF_FIRSTREAD (1 << 6) #define BRCMF_FIRSTREAD (1 << 6)
#define BRCMF_CONSOLE 10 /* watchdog interval to poll console */
/* SBSDIO_DEVICE_CTL */ /* SBSDIO_DEVICE_CTL */
@ -3204,6 +3206,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
if (IS_ERR_OR_NULL(dentry)) if (IS_ERR_OR_NULL(dentry))
return; return;
bus->console_interval = BRCMF_CONSOLE;
brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read); brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read);
brcmf_debugfs_add_entry(drvr, "counters", brcmf_debugfs_add_entry(drvr, "counters",
brcmf_debugfs_sdio_count_read); brcmf_debugfs_sdio_count_read);
@ -3613,7 +3617,7 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
} }
#ifdef DEBUG #ifdef DEBUG
/* Poll for console output periodically */ /* Poll for console output periodically */
if (bus->sdiodev->state == BRCMF_SDIOD_DATA && if (bus->sdiodev->state == BRCMF_SDIOD_DATA && BRCMF_FWCON_ON() &&
bus->console_interval != 0) { bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS; bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) { if (bus->console.count >= bus->console_interval) {

View file

@ -39,6 +39,7 @@
#define BRCM_CC_4339_CHIP_ID 0x4339 #define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_43430_CHIP_ID 43430 #define BRCM_CC_43430_CHIP_ID 43430
#define BRCM_CC_4345_CHIP_ID 0x4345 #define BRCM_CC_4345_CHIP_ID 0x4345
#define BRCM_CC_4350_CHIP_ID 0x4350
#define BRCM_CC_4354_CHIP_ID 0x4354 #define BRCM_CC_4354_CHIP_ID 0x4354
#define BRCM_CC_4356_CHIP_ID 0x4356 #define BRCM_CC_4356_CHIP_ID 0x4356
#define BRCM_CC_43566_CHIP_ID 43566 #define BRCM_CC_43566_CHIP_ID 43566
@ -47,6 +48,8 @@
#define BRCM_CC_43570_CHIP_ID 43570 #define BRCM_CC_43570_CHIP_ID 43570
#define BRCM_CC_4358_CHIP_ID 0x4358 #define BRCM_CC_4358_CHIP_ID 0x4358
#define BRCM_CC_43602_CHIP_ID 43602 #define BRCM_CC_43602_CHIP_ID 43602
#define BRCM_CC_4365_CHIP_ID 0x4365
#define BRCM_CC_4366_CHIP_ID 0x4366
/* USB Device IDs */ /* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e #define BRCM_USB_43143_DEVICE_ID 0xbd1e
@ -56,6 +59,7 @@
#define BRCM_USB_BCMFW_DEVICE_ID 0x0bdc #define BRCM_USB_BCMFW_DEVICE_ID 0x0bdc
/* PCIE Device IDs */ /* PCIE Device IDs */
#define BRCM_PCIE_4350_DEVICE_ID 0x43a3
#define BRCM_PCIE_4354_DEVICE_ID 0x43df #define BRCM_PCIE_4354_DEVICE_ID 0x43df
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec #define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3 #define BRCM_PCIE_43567_DEVICE_ID 0x43d3
@ -65,6 +69,13 @@
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb #define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc #define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602 #define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
#define BRCM_PCIE_4365_DEVICE_ID 0x43ca
#define BRCM_PCIE_4365_2G_DEVICE_ID 0x43cb
#define BRCM_PCIE_4365_5G_DEVICE_ID 0x43cc
#define BRCM_PCIE_4366_DEVICE_ID 0x43c3
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
/* brcmsmac IDs */ /* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ #define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */

View file

@ -142,6 +142,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
config IWLWIFI_DEVICE_TRACING config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing" bool "iwlwifi device access tracing"
depends on EVENT_TRACING depends on EVENT_TRACING
default y
help help
Say Y here to trace all commands, including TX frames and IO Say Y here to trace all commands, including TX frames and IO
accesses, sent to the device. If you say yes, iwlwifi will accesses, sent to the device. If you say yes, iwlwifi will

View file

@ -72,12 +72,10 @@
#define IWL7260_UCODE_API_MAX 17 #define IWL7260_UCODE_API_MAX 17
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12 #define IWL7260_UCODE_API_OK 13
#define IWL3165_UCODE_API_OK 13
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 12 #define IWL7260_UCODE_API_MIN 13
#define IWL3165_UCODE_API_MIN 13
/* NVM versions */ /* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d #define IWL7260_NVM_VERSION 0x0a1d
@ -113,7 +111,7 @@
static const struct iwl_base_params iwl7000_base_params = { static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = IWLAGN_NUM_QUEUES, .num_of_queues = 31,
.pll_cfg_val = 0, .pll_cfg_val = 0,
.shadow_ram_support = true, .shadow_ram_support = true,
.led_compensation = 57, .led_compensation = 57,
@ -269,11 +267,6 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 3165", .name = "Intel(R) Dual Band Wireless AC 3165",
.fw_name_pre = IWL7265D_FW_PRE, .fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7000, IWL_DEVICE_7000,
/* sparse doens't like the re-assignment but it is safe */
#ifndef __CHECKER__
.ucode_api_ok = IWL3165_UCODE_API_OK,
.ucode_api_min = IWL3165_UCODE_API_MIN,
#endif
.ht_params = &iwl7000_ht_params, .ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3165_NVM_VERSION, .nvm_ver = IWL3165_NVM_VERSION,
.nvm_calib_ver = IWL3165_TX_POWER_VERSION, .nvm_calib_ver = IWL3165_TX_POWER_VERSION,

View file

@ -72,10 +72,10 @@
#define IWL8000_UCODE_API_MAX 17 #define IWL8000_UCODE_API_MAX 17
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12 #define IWL8000_UCODE_API_OK 13
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 12 #define IWL8000_UCODE_API_MIN 13
/* NVM versions */ /* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d #define IWL8000_NVM_VERSION 0x0a1d
@ -107,7 +107,7 @@
static const struct iwl_base_params iwl8000_base_params = { static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = IWLAGN_NUM_QUEUES, .num_of_queues = 31,
.pll_cfg_val = 0, .pll_cfg_val = 0,
.shadow_ram_support = true, .shadow_ram_support = true,
.led_compensation = 57, .led_compensation = 57,

View file

@ -223,13 +223,13 @@ struct iwl_tt_tx_backoff {
* @support_tx_backoff: Support tx-backoff? * @support_tx_backoff: Support tx-backoff?
*/ */
struct iwl_tt_params { struct iwl_tt_params {
s32 ct_kill_entry; u32 ct_kill_entry;
s32 ct_kill_exit; u32 ct_kill_exit;
u32 ct_kill_duration; u32 ct_kill_duration;
s32 dynamic_smps_entry; u32 dynamic_smps_entry;
s32 dynamic_smps_exit; u32 dynamic_smps_exit;
s32 tx_protection_entry; u32 tx_protection_entry;
s32 tx_protection_exit; u32 tx_protection_exit;
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
bool support_ct_kill; bool support_ct_kill;
bool support_dynamic_smps; bool support_dynamic_smps;

View file

@ -450,7 +450,7 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
u32 api_flags = le32_to_cpu(ucode_api->api_flags); u32 api_flags = le32_to_cpu(ucode_api->api_flags);
int i; int i;
if (api_index >= IWL_API_MAX_BITS / 32) { if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
IWL_ERR(drv, "api_index larger than supported by driver\n"); IWL_ERR(drv, "api_index larger than supported by driver\n");
/* don't return an error so we can load FW that has more bits */ /* don't return an error so we can load FW that has more bits */
return 0; return 0;
@ -472,7 +472,7 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
u32 api_flags = le32_to_cpu(ucode_capa->api_capa); u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
int i; int i;
if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) { if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
IWL_ERR(drv, "api_index larger than supported by driver\n"); IWL_ERR(drv, "api_index larger than supported by driver\n");
/* don't return an error so we can load FW that has more bits */ /* don't return an error so we can load FW that has more bits */
return 0; return 0;

View file

@ -247,36 +247,31 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan. * longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
* IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
* @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
* @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
* @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
* through the dedicated host command.
* @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
* @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
* @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
* instead of 3. * instead of 3.
* @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
* (command version 3) that supports per-chain limits * (command version 3) that supports per-chain limits
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/ */
enum iwl_ucode_tlv_api { enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19,
IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
= 128
#endif
}; };
typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@ -311,6 +306,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* is supported. * is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/ */
enum iwl_ucode_tlv_capa { enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
@ -333,6 +330,12 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
= 128
#endif
}; };
/* The default calibrate table size if not specified by firmware file */ /* The default calibrate table size if not specified by firmware file */
@ -343,9 +346,6 @@ enum iwl_ucode_tlv_capa {
/* The default max probe length if not specified by the firmware file */ /* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200 #define IWL_DEFAULT_MAX_PROBE_LENGTH 200
#define IWL_API_MAX_BITS 64
#define IWL_CAPABILITIES_MAX_BITS 64
/* /*
* For 16.0 uCode and above, there is no differentiation between sections, * For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address. * just an offset to the HW address.

View file

@ -105,8 +105,8 @@ struct iwl_ucode_capabilities {
u32 n_scan_channels; u32 n_scan_channels;
u32 standard_phy_calibration_size; u32 standard_phy_calibration_size;
u32 flags; u32 flags;
unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)]; unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)]; unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
}; };
static inline bool static inline bool

View file

@ -36,6 +36,29 @@
#include "iwl-prph.h" #include "iwl-prph.h"
#include "iwl-fh.h" #include "iwl-fh.h"
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
iwl_trans_write8(trans, ofs, val);
}
IWL_EXPORT_SYMBOL(iwl_write8);
void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
iwl_trans_write32(trans, ofs, val);
}
IWL_EXPORT_SYMBOL(iwl_write32);
u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read32(trans, ofs);
trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
return val;
}
IWL_EXPORT_SYMBOL(iwl_read32);
#define IWL_POLL_INTERVAL 10 /* microseconds */ #define IWL_POLL_INTERVAL 10 /* microseconds */
int iwl_poll_bit(struct iwl_trans *trans, u32 addr, int iwl_poll_bit(struct iwl_trans *trans, u32 addr,

View file

@ -32,24 +32,9 @@
#include "iwl-devtrace.h" #include "iwl-devtrace.h"
#include "iwl-trans.h" #include "iwl-trans.h"
static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val);
{ void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val);
trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); u32 iwl_read32(struct iwl_trans *trans, u32 ofs);
iwl_trans_write8(trans, ofs, val);
}
static inline void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
iwl_trans_write32(trans, ofs, val);
}
static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read32(trans, ofs);
trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
return val;
}
static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{ {

View file

@ -580,13 +580,15 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
IWL_ERR_DEV(dev, "mac address is not found\n"); IWL_ERR_DEV(dev, "mac address is not found\n");
} }
#define IWL_4165_DEVICE_ID 0x5501
struct iwl_nvm_data * struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku, const __le16 *mac_override, const __le16 *phy_sku,
u8 tx_chains, u8 rx_chains, bool lar_fw_supported, u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
u32 mac_addr0, u32 mac_addr1) u32 mac_addr0, u32 mac_addr1, u32 hw_id)
{ {
struct iwl_nvm_data *data; struct iwl_nvm_data *data;
u32 sku; u32 sku;
@ -625,6 +627,17 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
(sku & NVM_SKU_CAP_11AC_ENABLE); (sku & NVM_SKU_CAP_11AC_ENABLE);
data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
/*
* OTP 0x52 bug work around
* define antenna 1x1 according to MIMO disabled
*/
if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) {
data->valid_tx_ant = ANT_B;
data->valid_rx_ant = ANT_B;
tx_chains = ANT_B;
rx_chains = ANT_B;
}
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {

View file

@ -79,7 +79,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku, const __le16 *mac_override, const __le16 *phy_sku,
u8 tx_chains, u8 rx_chains, bool lar_fw_supported, u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
u32 mac_addr0, u32 mac_addr1); u32 mac_addr0, u32 mac_addr1, u32 hw_id);
/** /**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW

View file

@ -7,6 +7,7 @@
* *
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -33,6 +34,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -108,7 +110,8 @@ struct iwl_cfg;
* interact with it. The driver layer typically calls the start and stop * interact with it. The driver layer typically calls the start and stop
* handlers, the transport layer calls the others. * handlers, the transport layer calls the others.
* *
* All the handlers MUST be implemented * All the handlers MUST be implemented, except @rx_rss which can be left
* out *iff* the opmode will never run on hardware with multi-queue capability.
* *
* @start: start the op_mode. The transport layer is already allocated. * @start: start the op_mode. The transport layer is already allocated.
* May sleep * May sleep
@ -116,6 +119,10 @@ struct iwl_cfg;
* May sleep * May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD this Rx responds to. Can't sleep. * HCMD this Rx responds to. Can't sleep.
* @rx_rss: data queue RX notification to the op_mode, for (data) notifications
* received on the RSS queue(s). The queue parameter indicates which of the
* RSS queues received this frame; it will always be non-zero.
* This method must not sleep.
* @queue_full: notifies that a HW queue is full. * @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled. * Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more. * @queue_not_full: notifies that a HW queue is not full any more.
@ -146,6 +153,8 @@ struct iwl_op_mode_ops {
void (*stop)(struct iwl_op_mode *op_mode); void (*stop)(struct iwl_op_mode *op_mode);
void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb); struct iwl_rx_cmd_buffer *rxb);
void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue); void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@ -186,6 +195,14 @@ static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
return op_mode->ops->rx(op_mode, napi, rxb); return op_mode->ops->rx(op_mode, napi, rxb);
} }
static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb,
unsigned int queue)
{
op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
}
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
int queue) int queue)
{ {

View file

@ -87,6 +87,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->cfg = cfg; trans->cfg = cfg;
trans->ops = ops; trans->ops = ops;
trans->dev_cmd_headroom = dev_cmd_headroom; trans->dev_cmd_headroom = dev_cmd_headroom;
trans->num_rx_queues = 1;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev)); "iwl_cmd_pool:%s", dev_name(trans->dev));

View file

@ -386,6 +386,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_MAX_HW_QUEUES 32 #define IWL_MAX_HW_QUEUES 32
#define IWL_MAX_TID_COUNT 8 #define IWL_MAX_TID_COUNT 8
#define IWL_FRAME_LIMIT 64 #define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16
/** /**
* enum iwl_wowlan_status - WoWLAN image/device status * enum iwl_wowlan_status - WoWLAN image/device status
@ -654,6 +655,8 @@ enum iwl_d0i3_mode {
* @hw_id_str: a string with info about HW ID. Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported * @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled * @ltr_enabled: set to true if the LTR is enabled
* @num_rx_queues: number of RX queues allocated by the transport;
* the transport must set this before calling iwl_drv_start()
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd. * The user should use iwl_trans_{alloc,free}_tx_cmd.
* @dev_cmd_headroom: room needed for the transport's private use before the * @dev_cmd_headroom: room needed for the transport's private use before the
@ -693,6 +696,8 @@ struct iwl_trans {
bool pm_support; bool pm_support;
bool ltr_enabled; bool ltr_enabled;
u8 num_rx_queues;
/* The following fields are internal only */ /* The following fields are internal only */
struct kmem_cache *dev_cmd_pool; struct kmem_cache *dev_cmd_pool;
size_t dev_cmd_headroom; size_t dev_cmd_headroom;

View file

@ -102,6 +102,7 @@
#define IWL_MVM_QUOTA_THRESHOLD 4 #define IWL_MVM_QUOTA_THRESHOLD 4
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_DISABLE_P2P_MIMO 0 #define IWL_MVM_RS_DISABLE_P2P_MIMO 0
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2

View file

@ -1170,6 +1170,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret; int ret;
/* make sure the d0i3 exit work is not pending */
flush_work(&mvm->d0i3_exit_work);
ret = iwl_trans_suspend(mvm->trans); ret = iwl_trans_suspend(mvm->trans);
if (ret) if (ret)
return ret; return ret;

View file

@ -511,7 +511,8 @@ static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm; struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = -EINVAL; u32 value;
int ret = -EINVAL;
char *data; char *data;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -599,7 +600,8 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm; struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0; u32 value;
int ret = 0;
char *data; char *data;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -822,7 +824,8 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm; struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0; u32 value;
int ret = 0;
char *data; char *data;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -892,6 +895,7 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
goto out; goto out;
} }
memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN); memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
goto out;
} }
data = iwl_dbgfs_is_match("macaddr_mask=", buf); data = iwl_dbgfs_is_match("macaddr_mask=", buf);
@ -903,21 +907,22 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
goto out; goto out;
} }
memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN); memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
goto out;
} }
data = iwl_dbgfs_is_match("ap=", buf); data = iwl_dbgfs_is_match("ap=", buf);
if (data) { if (data) {
struct iwl_tof_range_req_ap_entry ap; struct iwl_tof_range_req_ap_entry ap = {};
int size = sizeof(struct iwl_tof_range_req_ap_entry); int size = sizeof(struct iwl_tof_range_req_ap_entry);
u16 burst_period; u16 burst_period;
u8 *mac = ap.bssid; u8 *mac = ap.bssid;
unsigned int i; unsigned int i;
if (sscanf(data, "%u %hhd %hhx %hhx" if (sscanf(data, "%u %hhd %hhd %hhd"
"%hhx:%hhx:%hhx:%hhx:%hhx:%hhx" "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
"%hhx %hhx %hx" "%hhd %hhd %hd"
"%hhx %hhx %x" "%hhd %hhd %d"
"%hhx %hhx %hhx %hhx", "%hhx %hhd %hhd %hhd",
&i, &ap.channel_num, &ap.bandwidth, &i, &ap.channel_num, &ap.bandwidth,
&ap.ctrl_ch_position, &ap.ctrl_ch_position,
mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
@ -944,12 +949,12 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
data = iwl_dbgfs_is_match("send_range_request=", buf); data = iwl_dbgfs_is_match("send_range_request=", buf);
if (data) { if (data) {
ret = kstrtou32(data, 10, &value); ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) { if (ret == 0 && value)
ret = iwl_mvm_tof_range_request_cmd(mvm, vif); ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
goto out; goto out;
}
} }
ret = -EINVAL;
out: out:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return ret ?: count; return ret ?: count;
@ -994,16 +999,18 @@ static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i]; struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"ap %.2d: channel_num=%hhx bw=%hhx" "ap %.2d: channel_num=%hhd bw=%hhd"
" control=%hhx bssid=%pM type=%hhx" " control=%hhd bssid=%pM type=%hhd"
" num_of_bursts=%hhx burst_period=%hx ftm=%hhx" " num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
" retries=%hhx tsf_delta=%x location_req=%hhx " " retries=%hhd tsf_delta=%d"
" asap=%hhx enable=%hhx rssi=%hhx\n", " tsf_delta_direction=%hhd location_req=0x%hhx "
" asap=%hhd enable=%hhd rssi=%hhd\n",
i, ap->channel_num, ap->bandwidth, i, ap->channel_num, ap->bandwidth,
ap->ctrl_ch_position, ap->bssid, ap->ctrl_ch_position, ap->bssid,
ap->measure_type, ap->num_of_bursts, ap->measure_type, ap->num_of_bursts,
ap->burst_period, ap->samples_per_burst, ap->burst_period, ap->samples_per_burst,
ap->retries_per_sample, ap->tsf_delta, ap->retries_per_sample, ap->tsf_delta,
ap->tsf_delta_direction,
ap->location_req, ap->asap_mode, ap->location_req, ap->asap_mode,
ap->enable_dyn_ack, ap->rssi); ap->enable_dyn_ack, ap->rssi);
} }
@ -1019,7 +1026,8 @@ static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm; struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0; u32 value;
int ret = 0;
char *data; char *data;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -1071,12 +1079,12 @@ static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
data = iwl_dbgfs_is_match("send_range_req_ext=", buf); data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
if (data) { if (data) {
ret = kstrtou32(data, 10, &value); ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) { if (ret == 0 && value)
ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
goto out; goto out;
}
} }
ret = -EINVAL;
out: out:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
return ret ?: count; return ret ?: count;
@ -1099,18 +1107,18 @@ static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"tsf_timer_offset_msec = %hx\n", "tsf_timer_offset_msec = %hd\n",
cmd->tsf_timer_offset_msec); cmd->tsf_timer_offset_msec);
pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n", pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
cmd->min_delta_ftm); cmd->min_delta_ftm);
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw20M = %hhx\n", "ftm_format_and_bw20M = %hhd\n",
cmd->ftm_format_and_bw20M); cmd->ftm_format_and_bw20M);
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw40M = %hhx\n", "ftm_format_and_bw40M = %hhd\n",
cmd->ftm_format_and_bw40M); cmd->ftm_format_and_bw40M);
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"ftm_format_and_bw80M = %hhx\n", "ftm_format_and_bw80M = %hhd\n",
cmd->ftm_format_and_bw80M); cmd->ftm_format_and_bw80M);
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
@ -1123,8 +1131,8 @@ static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm; struct iwl_mvm *mvm = mvmvif->mvm;
int value, ret = 0; u32 value;
int abort_id; int abort_id, ret = 0;
char *data; char *data;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -1205,11 +1213,11 @@ static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i]; struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"ap %.2d: bssid=%pM status=%hhx bw=%hhx" "ap %.2d: bssid=%pM status=%hhd bw=%hhd"
" rtt=%x rtt_var=%x rtt_spread=%x" " rtt=%d rtt_var=%d rtt_spread=%d"
" rssi=%hhx rssi_spread=%hhx" " rssi=%hhd rssi_spread=%hhd"
" range=%x range_var=%x" " range=%d range_var=%d"
" time_stamp=%x\n", " time_stamp=%d\n",
i, ap->bssid, ap->measure_status, i, ap->bssid, ap->measure_status,
ap->measure_bw, ap->measure_bw,
ap->rtt, ap->rtt_variance, ap->rtt_spread, ap->rtt, ap->rtt_variance, ap->rtt_spread,
@ -1250,11 +1258,10 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
{ {
struct ieee80211_vif *vif = file->private_data; struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[3]; char buf[2];
buf[0] = mvmvif->low_latency ? '1' : '0'; buf[0] = mvmvif->low_latency ? '1' : '0';
buf[1] = '\n'; buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
} }

View file

@ -1214,118 +1214,6 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
return ret; return ret;
} }
#define MAX_NUM_ND_MATCHSETS 10
static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
const char *seps = ",\n";
char *buf_ptr = buf;
char *value_str = NULL;
int ret, i;
/* TODO: don't free if write is being called several times in one go */
if (mvm->nd_config) {
kfree(mvm->nd_config->match_sets);
kfree(mvm->nd_config);
mvm->nd_config = NULL;
}
mvm->nd_config = kzalloc(sizeof(*mvm->nd_config) +
(11 * sizeof(struct ieee80211_channel *)),
GFP_KERNEL);
if (!mvm->nd_config) {
ret = -ENOMEM;
goto out_free;
}
mvm->nd_config->n_channels = 11;
mvm->nd_config->scan_width = NL80211_BSS_CHAN_WIDTH_20;
mvm->nd_config->interval = 5;
mvm->nd_config->min_rssi_thold = -80;
for (i = 0; i < mvm->nd_config->n_channels; i++)
mvm->nd_config->channels[i] = &mvm->nvm_data->channels[i];
mvm->nd_config->match_sets =
kcalloc(MAX_NUM_ND_MATCHSETS,
sizeof(*mvm->nd_config->match_sets),
GFP_KERNEL);
if (!mvm->nd_config->match_sets) {
ret = -ENOMEM;
goto out_free;
}
while ((value_str = strsep(&buf_ptr, seps)) &&
strlen(value_str)) {
struct cfg80211_match_set *set;
if (mvm->nd_config->n_match_sets >= MAX_NUM_ND_MATCHSETS) {
ret = -EINVAL;
goto out_free;
}
set = &mvm->nd_config->match_sets[mvm->nd_config->n_match_sets];
set->ssid.ssid_len = strlen(value_str);
if (set->ssid.ssid_len > IEEE80211_MAX_SSID_LEN) {
ret = -EINVAL;
goto out_free;
}
memcpy(set->ssid.ssid, value_str, set->ssid.ssid_len);
mvm->nd_config->n_match_sets++;
}
ret = count;
if (mvm->nd_config->n_match_sets)
goto out;
out_free:
if (mvm->nd_config)
kfree(mvm->nd_config->match_sets);
kfree(mvm->nd_config);
mvm->nd_config = NULL;
out:
return ret;
}
static ssize_t
iwl_dbgfs_netdetect_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
size_t bufsz, ret;
char *buf;
int i, n_match_sets, pos = 0;
n_match_sets = mvm->nd_config ? mvm->nd_config->n_match_sets : 0;
bufsz = n_match_sets * (IEEE80211_MAX_SSID_LEN + 1) + 1;
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < n_match_sets; i++) {
if (pos +
mvm->nd_config->match_sets[i].ssid.ssid_len + 2 > bufsz) {
ret = -EIO;
goto out;
}
memcpy(buf + pos, mvm->nd_config->match_sets[i].ssid.ssid,
mvm->nd_config->match_sets[i].ssid.ssid_len);
pos += mvm->nd_config->match_sets[i].ssid.ssid_len;
buf[pos++] = '\n';
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
out:
kfree(buf);
return ret;
}
#endif #endif
#define PRINT_MVM_REF(ref) do { \ #define PRINT_MVM_REF(ref) do { \
@ -1473,11 +1361,25 @@ iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
return count; return count;
} }
static ssize_t
iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
int ret;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
mutex_unlock(&mvm->mutex);
return ret ?: count;
}
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */ /* Device wide debugfs entries */
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
@ -1503,7 +1405,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(netdetect, 384);
#endif #endif
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
@ -1538,6 +1439,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
if (!debugfs_create_bool("enable_scan_iteration_notif", if (!debugfs_create_bool("enable_scan_iteration_notif",
S_IRUSR | S_IWUSR, S_IRUSR | S_IWUSR,
mvm->debugfs_dir, mvm->debugfs_dir,
@ -1572,7 +1474,6 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR, if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR,
mvm->debugfs_dir, &mvm->last_netdetect_scans)) mvm->debugfs_dir, &mvm->last_netdetect_scans))
goto err; goto err;
MVM_DEBUGFS_ADD_FILE(netdetect, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
#endif #endif
if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR, if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR,
@ -1594,6 +1495,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (!debugfs_create_blob("nvm_prod", S_IRUSR, if (!debugfs_create_blob("nvm_prod", S_IRUSR,
mvm->debugfs_dir, &mvm->nvm_prod_blob)) mvm->debugfs_dir, &mvm->nvm_prod_blob))
goto err; goto err;
if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR,
mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
goto err;
/* /*
* Create a symlink with mac80211. It will be removed when mac80211 * Create a symlink with mac80211. It will be removed when mac80211

View file

@ -192,16 +192,10 @@ struct iwl_powertable_cmd {
/** /**
* enum iwl_device_power_flags - masks for device power command flags * enum iwl_device_power_flags - masks for device power command flags
* @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
* receiver and transmitter. '0' - does not allow. This flag should be * receiver and transmitter. '0' - does not allow.
* always set to '1' unless one need to disable actual power down for debug
* purposes.
* @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
* that power management is disabled. '0' Power management is enabled, one
* of power schemes is applied.
*/ */
enum iwl_device_power_flags { enum iwl_device_power_flags {
DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
DEVICE_POWER_FLAGS_CAM_MSK = BIT(13),
}; };
/** /**

View file

@ -0,0 +1,238 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __fw_api_rx_h__
#define __fw_api_rx_h__
#define IWL_RX_INFO_PHY_CNT 8
#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
/**
* struct iwl_rx_phy_info - phy info
* (REPLY_RX_PHY_CMD = 0xc0)
* @non_cfg_phy_cnt: non configurable DSP phy data byte count
* @cfg_phy_cnt: configurable DSP phy data byte count
* @stat_id: configurable DSP phy data set ID
* @reserved1:
* @system_timestamp: GP2 at on air rise
* @timestamp: TSF at on air rise
* @beacon_time_stamp: beacon at on-air rise
* @phy_flags: general phy flags: band, modulation, ...
* @channel: channel number
* @non_cfg_phy_buf: for various implementations of non_cfg_phy
* @rate_n_flags: RATE_MCS_*
* @byte_count: frame's byte-count
* @frame_time: frame's time on the air, based on byte count and frame rate
* calculation
* @mac_active_msk: what MACs were active when the frame was received
*
* Before each Rx, the device sends this data. It contains PHY information
* about the reception of the packet.
*/
struct iwl_rx_phy_info {
u8 non_cfg_phy_cnt;
u8 cfg_phy_cnt;
u8 stat_id;
u8 reserved1;
__le32 system_timestamp;
__le64 timestamp;
__le32 beacon_time_stamp;
__le16 phy_flags;
__le16 channel;
__le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
__le32 rate_n_flags;
__le32 byte_count;
__le16 mac_active_msk;
__le16 frame_time;
} __packed;
/*
* TCP offload Rx assist info
*
* bits 0:3 - reserved
* bits 4:7 - MIC CRC length
* bits 8:12 - MAC header length
* bit 13 - Padding indication
* bit 14 - A-AMSDU indication
* bit 15 - Offload enabled
*/
enum iwl_csum_rx_assist_info {
CSUM_RXA_RESERVED_MASK = 0x000f,
CSUM_RXA_MICSIZE_MASK = 0x00f0,
CSUM_RXA_HEADERLEN_MASK = 0x1f00,
CSUM_RXA_PADD = BIT(13),
CSUM_RXA_AMSDU = BIT(14),
CSUM_RXA_ENA = BIT(15)
};
/**
* struct iwl_rx_mpdu_res_start - phy info
* @assist: see CSUM_RX_ASSIST_ above
*/
struct iwl_rx_mpdu_res_start {
__le16 byte_count;
__le16 assist;
} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
/**
* enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
* @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
* @RX_RES_PHY_FLAGS_MOD_CCK:
* @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
* @RX_RES_PHY_FLAGS_NARROW_BAND:
* @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
* @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
* @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
* @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
* @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
*/
enum iwl_rx_phy_flags {
RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
RX_RES_PHY_FLAGS_AGG = BIT(7),
RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
};
/**
* enum iwl_mvm_rx_status - written by fw for each Rx packet
* @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
* @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
* @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
* @RX_MPDU_RES_STATUS_KEY_VALID:
* @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
* @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
* @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
* in the driver.
* @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
* @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
* alg = CCM only. Checks replay attack for 11w frames. Relevant only if
* %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
* @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
* @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
* @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
* @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
* @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
* @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
* @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
* @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
* @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
* @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
* @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
* @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
* @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
* @RX_MPDU_RES_STATUS_STA_ID_MSK:
* @RX_MPDU_RES_STATUS_RRF_KILL:
* @RX_MPDU_RES_STATUS_FILTERING_MSK:
* @RX_MPDU_RES_STATUS2_FILTERING_MSK:
*/
enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12),
RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
};
#endif /* __fw_api_rx_h__ */

View file

@ -219,32 +219,6 @@ struct mvm_statistics_bt_activity {
__le32 lo_priority_rx_denied_cnt; __le32 lo_priority_rx_denied_cnt;
} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ } __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
struct mvm_statistics_general_v5 {
__le32 radio_temperature;
__le32 radio_voltage;
struct mvm_statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
struct mvm_statistics_div slow_div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
* count the number of times we have to re-tune
* in order to get out of bad PHY status
*/
__le32 num_of_sos_states;
__le32 beacon_filtered;
__le32 missed_beacons;
__s8 beacon_filter_average_energy;
__s8 beacon_filter_reason;
__s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved;
__le32 beacon_filter_delta_time;
struct mvm_statistics_bt_activity bt_activity;
} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
struct mvm_statistics_general_v8 { struct mvm_statistics_general_v8 {
__le32 radio_temperature; __le32 radio_temperature;
__le32 radio_voltage; __le32 radio_voltage;
@ -263,10 +237,10 @@ struct mvm_statistics_general_v8 {
__le32 num_of_sos_states; __le32 num_of_sos_states;
__le32 beacon_filtered; __le32 beacon_filtered;
__le32 missed_beacons; __le32 missed_beacons;
__s8 beacon_filter_average_energy; u8 beacon_filter_average_energy;
__s8 beacon_filter_reason; u8 beacon_filter_reason;
__s8 beacon_filter_current_energy; u8 beacon_filter_current_energy;
__s8 beacon_filter_reserved; u8 beacon_filter_reserved;
__le32 beacon_filter_delta_time; __le32 beacon_filter_delta_time;
struct mvm_statistics_bt_activity bt_activity; struct mvm_statistics_bt_activity bt_activity;
__le64 rx_time; __le64 rx_time;
@ -293,13 +267,6 @@ struct mvm_statistics_rx {
* STATISTICS_CMD (0x9c), below. * STATISTICS_CMD (0x9c), below.
*/ */
struct iwl_notif_statistics_v8 {
__le32 flag;
struct mvm_statistics_rx rx;
struct mvm_statistics_tx tx;
struct mvm_statistics_general_v5 general;
} __packed; /* STATISTICS_NTFY_API_S_VER_8 */
struct iwl_notif_statistics_v10 { struct iwl_notif_statistics_v10 {
__le32 flag; __le32 flag;
struct mvm_statistics_rx rx; struct mvm_statistics_rx rx;

View file

@ -67,6 +67,7 @@
#define __fw_api_h__ #define __fw_api_h__
#include "fw-api-rs.h" #include "fw-api-rs.h"
#include "fw-api-rx.h"
#include "fw-api-tx.h" #include "fw-api-tx.h"
#include "fw-api-sta.h" #include "fw-api-sta.h"
#include "fw-api-mac.h" #include "fw-api-mac.h"
@ -100,6 +101,7 @@ enum iwl_mvm_tx_fifo {
enum { enum {
MVM_ALIVE = 0x1, MVM_ALIVE = 0x1,
REPLY_ERROR = 0x2, REPLY_ERROR = 0x2,
ECHO_CMD = 0x3,
INIT_COMPLETE_NOTIF = 0x4, INIT_COMPLETE_NOTIF = 0x4,
@ -266,6 +268,16 @@ enum {
REPLY_MAX = 0xff, REPLY_MAX = 0xff,
}; };
enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
/* command groups */
enum {
PHY_OPS_GROUP = 0x4,
};
/** /**
* struct iwl_cmd_response - generic response struct for most commands * struct iwl_cmd_response - generic response struct for most commands
* @status: status of the command asked, changes for each one * @status: status of the command asked, changes for each one
@ -1070,190 +1082,6 @@ struct iwl_hs20_roc_res {
__le32 status; __le32 status;
} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ } __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
#define IWL_RX_INFO_PHY_CNT 8
#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
#define IWL_RX_INFO_AGC_IDX 1
#define IWL_RX_INFO_RSSI_AB_IDX 2
#define IWL_OFDM_AGC_A_MSK 0x0000007f
#define IWL_OFDM_AGC_A_POS 0
#define IWL_OFDM_AGC_B_MSK 0x00003f80
#define IWL_OFDM_AGC_B_POS 7
#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
#define IWL_OFDM_AGC_CODE_POS 20
#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
#define IWL_OFDM_RSSI_A_POS 0
#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
#define IWL_OFDM_RSSI_B_POS 16
#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
/**
* struct iwl_rx_phy_info - phy info
* (REPLY_RX_PHY_CMD = 0xc0)
* @non_cfg_phy_cnt: non configurable DSP phy data byte count
* @cfg_phy_cnt: configurable DSP phy data byte count
* @stat_id: configurable DSP phy data set ID
* @reserved1:
* @system_timestamp: GP2 at on air rise
* @timestamp: TSF at on air rise
* @beacon_time_stamp: beacon at on-air rise
* @phy_flags: general phy flags: band, modulation, ...
* @channel: channel number
* @non_cfg_phy_buf: for various implementations of non_cfg_phy
* @rate_n_flags: RATE_MCS_*
* @byte_count: frame's byte-count
* @frame_time: frame's time on the air, based on byte count and frame rate
* calculation
* @mac_active_msk: what MACs were active when the frame was received
*
* Before each Rx, the device sends this data. It contains PHY information
* about the reception of the packet.
*/
struct iwl_rx_phy_info {
u8 non_cfg_phy_cnt;
u8 cfg_phy_cnt;
u8 stat_id;
u8 reserved1;
__le32 system_timestamp;
__le64 timestamp;
__le32 beacon_time_stamp;
__le16 phy_flags;
__le16 channel;
__le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
__le32 rate_n_flags;
__le32 byte_count;
__le16 mac_active_msk;
__le16 frame_time;
} __packed;
/*
* TCP offload Rx assist info
*
* bits 0:3 - reserved
* bits 4:7 - MIC CRC length
* bits 8:12 - MAC header length
* bit 13 - Padding indication
* bit 14 - A-AMSDU indication
* bit 15 - Offload enabled
*/
enum iwl_csum_rx_assist_info {
CSUM_RXA_RESERVED_MASK = 0x000f,
CSUM_RXA_MICSIZE_MASK = 0x00f0,
CSUM_RXA_HEADERLEN_MASK = 0x1f00,
CSUM_RXA_PADD = BIT(13),
CSUM_RXA_AMSDU = BIT(14),
CSUM_RXA_ENA = BIT(15)
};
/**
* struct iwl_rx_mpdu_res_start - phy info
* @assist: see CSUM_RX_ASSIST_ above
*/
struct iwl_rx_mpdu_res_start {
__le16 byte_count;
__le16 assist;
} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
/**
* enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
* @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
* @RX_RES_PHY_FLAGS_MOD_CCK:
* @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
* @RX_RES_PHY_FLAGS_NARROW_BAND:
* @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
* @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
* @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
* @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
* @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
*/
enum iwl_rx_phy_flags {
RX_RES_PHY_FLAGS_BAND_24 = BIT(0),
RX_RES_PHY_FLAGS_MOD_CCK = BIT(1),
RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2),
RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3),
RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4),
RX_RES_PHY_FLAGS_ANTENNA_POS = 4,
RX_RES_PHY_FLAGS_AGG = BIT(7),
RX_RES_PHY_FLAGS_OFDM_HT = BIT(8),
RX_RES_PHY_FLAGS_OFDM_GF = BIT(9),
RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10),
};
/**
* enum iwl_mvm_rx_status - written by fw for each Rx packet
* @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
* @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
* @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
* @RX_MPDU_RES_STATUS_KEY_VALID:
* @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
* @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
* @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
* in the driver.
* @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
* @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
* alg = CCM only. Checks replay attack for 11w frames. Relevant only if
* %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
* @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
* @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
* @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
* @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
* @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
* @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
* @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
* @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
* @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
* @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
* @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
* @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
* @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
* @RX_MPDU_RES_STATUS_STA_ID_MSK:
* @RX_MPDU_RES_STATUS_RRF_KILL:
* @RX_MPDU_RES_STATUS_FILTERING_MSK:
* @RX_MPDU_RES_STATUS2_FILTERING_MSK:
*/
enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7),
RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8),
RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8),
RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8),
RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8),
RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP = BIT(12),
RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000),
RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000),
RX_MPDU_RES_STATUS_RRF_KILL = BIT(29),
RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
};
/** /**
* struct iwl_radio_version_notif - information on the radio version * struct iwl_radio_version_notif - information on the radio version
* ( RADIO_VERSION_NOTIFICATION = 0x68 ) * ( RADIO_VERSION_NOTIFICATION = 0x68 )

View file

@ -616,12 +616,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
* will be empty. * will be empty.
*/ */
for (i = 0; i < IWL_MAX_HW_QUEUES; i++) { memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE) mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
mvm->queue_to_mac80211[i] = i;
else
mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
}
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0); atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@ -940,19 +936,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
return ret; return ret;
} }
static int iwl_mvm_config_ltr_v1(struct iwl_mvm *mvm)
{
struct iwl_ltr_config_cmd_v1 cmd_v1 = {
.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
};
if (!mvm->trans->ltr_enabled)
return 0;
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
sizeof(cmd_v1), &cmd_v1);
}
static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
{ {
struct iwl_ltr_config_cmd cmd = { struct iwl_ltr_config_cmd cmd = {
@ -962,9 +945,6 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
if (!mvm->trans->ltr_enabled) if (!mvm->trans->ltr_enabled)
return 0; return 0;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
return iwl_mvm_config_ltr_v1(mvm);
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
sizeof(cmd), &cmd); sizeof(cmd), &cmd);
} }

View file

@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -33,6 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -484,16 +486,18 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) { switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_TX_FIFO_VO, wdg_timeout); IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
break; break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MVM_TX_FIFO_MCAST, wdg_timeout); IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* fall through */ /* fall through */
default: default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac], vif->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac], 0,
wdg_timeout); wdg_timeout);
break; break;
} }
@ -509,14 +513,19 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) { switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_P2P_DEVICE:
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 0); iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
0);
break; break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, 0); iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
/* fall through */ /* fall through */
default: default:
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 0); iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
IWL_MAX_TID_COUNT, 0);
} }
} }
@ -1128,6 +1137,7 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
u32 action) u32 action)
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_ctx_cmd cmd = {};
WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
@ -1137,10 +1147,16 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
/* /*
* pass probe requests and beacons from other APs (needed * pass probe requests and beacons from other APs (needed
* for ht protection) * for ht protection); when there're no any associated station
* don't ask FW to pass beacons to prevent unnecessary wake-ups.
*/ */
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST | cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
MAC_FILTER_IN_BEACON); if (mvmvif->ap_assoc_sta_count) {
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
} else {
IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
}
/* Fill the data specific for ap mode */ /* Fill the data specific for ap mode */
iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap, iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,

View file

@ -1577,20 +1577,6 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
return NULL; return NULL;
} }
static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, s8 tx_power)
{
/* FW is in charge of regulatory enforcement */
struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
.mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
.pwr_restriction = cpu_to_le16(tx_power),
};
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
sizeof(reduce_txpwr_cmd),
&reduce_txpwr_cmd);
}
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power) s16 tx_power)
{ {
@ -1602,9 +1588,6 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}; };
int len = sizeof(cmd); int len = sizeof(cmd);
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER) if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
@ -2319,6 +2302,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP) if (vif->type == NL80211_IFTYPE_AP)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
mvmvif->ap_assoc_sta_count = 0;
/* Add the mac context */ /* Add the mac context */
ret = iwl_mvm_mac_ctxt_add(mvm, vif); ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret) if (ret)
@ -2613,6 +2598,7 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
/* /*
@ -2627,6 +2613,12 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-ENOENT)); ERR_PTR(-ENOENT));
if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
mvmvif->ap_assoc_sta_count--;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }

View file

@ -82,7 +82,6 @@
#include "constants.h" #include "constants.h"
#include "tof.h" #include "tof.h"
#define IWL_INVALID_MAC80211_QUEUE 0xff
#define IWL_MVM_MAX_ADDRESSES 5 #define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */ /* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50 #define IWL_RSSI_OFFSET 50
@ -323,11 +322,11 @@ enum iwl_bt_force_ant_mode {
struct iwl_mvm_vif_bf_data { struct iwl_mvm_vif_bf_data {
bool bf_enabled; bool bf_enabled;
bool ba_enabled; bool ba_enabled;
s8 ave_beacon_signal; int ave_beacon_signal;
s8 last_cqm_event; int last_cqm_event;
s8 bt_coex_min_thold; int bt_coex_min_thold;
s8 bt_coex_max_thold; int bt_coex_max_thold;
s8 last_bt_coex_event; int last_bt_coex_event;
}; };
/** /**
@ -338,6 +337,8 @@ struct iwl_mvm_vif_bf_data {
* @bssid: BSSID for this (client) interface * @bssid: BSSID for this (client) interface
* @associated: indicates that we're currently associated, used only for * @associated: indicates that we're currently associated, used only for
* managing the firmware state in iwl_mvm_bss_info_changed_station() * managing the firmware state in iwl_mvm_bss_info_changed_station()
* @ap_assoc_sta_count: count of stations associated to us - valid only
* if VIF type is AP
* @uploaded: indicates the MAC context has been added to the device * @uploaded: indicates the MAC context has been added to the device
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc. * should get quota etc.
@ -367,6 +368,7 @@ struct iwl_mvm_vif {
u8 bssid[ETH_ALEN]; u8 bssid[ETH_ALEN];
bool associated; bool associated;
u8 ap_assoc_sta_count;
bool uploaded; bool uploaded;
bool ap_ibss_active; bool ap_ibss_active;
@ -602,7 +604,14 @@ struct iwl_mvm {
u64 on_time_scan; u64 on_time_scan;
} radio_stats, accu_radio_stats; } radio_stats, accu_radio_stats;
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; struct {
/* Map to HW queue */
u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
bool setup_reserved;
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
} queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name; const char *nvm_file_name;
@ -679,6 +688,7 @@ struct iwl_mvm {
struct debugfs_blob_wrapper nvm_sw_blob; struct debugfs_blob_wrapper nvm_sw_blob;
struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_calib_blob;
struct debugfs_blob_wrapper nvm_prod_blob; struct debugfs_blob_wrapper nvm_prod_blob;
struct debugfs_blob_wrapper nvm_phy_sku_blob;
struct iwl_mvm_frame_stats drv_rx_stats; struct iwl_mvm_frame_stats drv_rx_stats;
spinlock_t drv_stats_lock; spinlock_t drv_stats_lock;
@ -907,6 +917,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
} }
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
}
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{ {
bool nvm_lar = mvm->nvm_data->lar_enabled; bool nvm_lar = mvm->nvm_data->lar_enabled;
@ -934,11 +950,6 @@ static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
} }
static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
}
static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
{ {
return fw_has_capa(&mvm->fw->ucode_capa, return fw_has_capa(&mvm->fw->ucode_capa,
@ -959,6 +970,12 @@ static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
} }
static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
{
/* firmware flag isn't defined yet */
return false;
}
extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info { struct iwl_rate_info {
@ -1131,7 +1148,6 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif); struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif); struct ieee80211_vif *exclude_vif);
/* Bindings */ /* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@ -1344,14 +1360,20 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
} }
/* hw scheduler queue config */ /* hw scheduler queue config */
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn, void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
const struct iwl_trans_txq_scd_cfg *cfg, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout); unsigned int wdg_timeout);
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags); /*
* Disable a TXQ.
* Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
*/
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
static inline static inline
void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 fifo, unsigned int wdg_timeout) u8 fifo, u16 ssn, unsigned int wdg_timeout)
{ {
struct iwl_trans_txq_scd_cfg cfg = { struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo, .fifo = fifo,
@ -1360,13 +1382,13 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
.frame_limit = IWL_FRAME_LIMIT, .frame_limit = IWL_FRAME_LIMIT,
}; };
iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout); iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
} }
static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
int fifo, int sta_id, int tid, int mac80211_queue, int fifo,
int frame_limit, u16 ssn, int sta_id, int tid, int frame_limit,
unsigned int wdg_timeout) u16 ssn, unsigned int wdg_timeout)
{ {
struct iwl_trans_txq_scd_cfg cfg = { struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo, .fifo = fifo,
@ -1376,7 +1398,7 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
.aggregate = true, .aggregate = true,
}; };
iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout); iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
} }
/* Thermal management and CT-kill */ /* Thermal management and CT-kill */

View file

@ -316,7 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
regulatory, mac_override, phy_sku, regulatory, mac_override, phy_sku,
mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
lar_enabled, mac_addr0, mac_addr1); lar_enabled, mac_addr0, mac_addr1,
mvm->trans->hw_id);
} }
#define MAX_NVM_FILE_LEN 16384 #define MAX_NVM_FILE_LEN 16384
@ -563,6 +564,10 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
mvm->nvm_prod_blob.data = temp; mvm->nvm_prod_blob.data = temp;
mvm->nvm_prod_blob.size = ret; mvm->nvm_prod_blob.size = ret;
break; break;
case NVM_SECTION_TYPE_PHY_SKU:
mvm->nvm_phy_sku_blob.data = temp;
mvm->nvm_phy_sku_blob.size = ret;
break;
default: default:
if (section == mvm->cfg->nvm_hw_section_num) { if (section == mvm->cfg->nvm_hw_section_num) {
mvm->nvm_hw_blob.data = temp; mvm->nvm_hw_blob.data = temp;

View file

@ -89,6 +89,7 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static const struct iwl_op_mode_ops iwl_mvm_ops; static const struct iwl_op_mode_ops iwl_mvm_ops;
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
struct iwl_mvm_mod_params iwlmvm_mod_params = { struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS, .power_scheme = IWL_POWER_SCHEME_BPS,
@ -222,7 +223,6 @@ struct iwl_rx_handlers {
* called from a worker with mvm->mutex held. * called from a worker with mvm->mutex held.
*/ */
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
@ -257,6 +257,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
iwl_mvm_power_uapsd_misbehaving_ap_notif, false), iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
iwl_mvm_temp_notif, true),
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
true), true),
@ -271,6 +273,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = { static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
CMD(MVM_ALIVE), CMD(MVM_ALIVE),
CMD(REPLY_ERROR), CMD(REPLY_ERROR),
CMD(ECHO_CMD),
CMD(INIT_COMPLETE_NOTIF), CMD(INIT_COMPLETE_NOTIF),
CMD(PHY_CONTEXT_CMD), CMD(PHY_CONTEXT_CMD),
CMD(MGMT_MCAST_KEY), CMD(MGMT_MCAST_KEY),
@ -422,7 +425,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
op_mode = hw->priv; op_mode = hw->priv;
op_mode->ops = &iwl_mvm_ops;
mvm = IWL_OP_MODE_GET_MVM(op_mode); mvm = IWL_OP_MODE_GET_MVM(op_mode);
mvm->dev = trans->dev; mvm->dev = trans->dev;
@ -431,6 +433,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw; mvm->fw = fw;
mvm->hw = hw; mvm->hw = hw;
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
} else {
op_mode->ops = &iwl_mvm_ops;
if (WARN_ON(trans->num_rx_queues > 1))
goto out_free;
}
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
mvm->aux_queue = 15; mvm->aux_queue = 15;
@ -451,6 +462,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->aux_roc_te_list);
INIT_LIST_HEAD(&mvm->async_handlers_list); INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock); spin_lock_init(&mvm->time_event_lock);
spin_lock_init(&mvm->queue_info_lock);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@ -716,18 +728,11 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
} }
} }
static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_cmd_buffer *rxb) struct iwl_rx_packet *pkt)
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); int i;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u8 i;
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
return;
}
iwl_mvm_rx_check_trigger(mvm, pkt); iwl_mvm_rx_check_trigger(mvm, pkt);
@ -767,40 +772,84 @@ static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
} }
} }
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
int mq = mvm->queue_to_mac80211[queue]; unsigned long mq;
int q;
if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
spin_unlock_bh(&mvm->queue_info_lock);
if (WARN_ON_ONCE(!mq))
return; return;
if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) { for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
IWL_DEBUG_TX_QUEUES(mvm, if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
"queue %d (mac80211 %d) already stopped\n", IWL_DEBUG_TX_QUEUES(mvm,
queue, mq); "queue %d (mac80211 %d) already stopped\n",
return; queue, q);
continue;
}
ieee80211_stop_queue(mvm->hw, q);
} }
ieee80211_stop_queue(mvm->hw, mq);
} }
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
{ {
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
int mq = mvm->queue_to_mac80211[queue]; unsigned long mq;
int q;
if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) spin_lock_bh(&mvm->queue_info_lock);
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
spin_unlock_bh(&mvm->queue_info_lock);
if (WARN_ON_ONCE(!mq))
return; return;
if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) { for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
IWL_DEBUG_TX_QUEUES(mvm, if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
"queue %d (mac80211 %d) still stopped\n", IWL_DEBUG_TX_QUEUES(mvm,
queue, mq); "queue %d (mac80211 %d) still stopped\n",
return; queue, q);
continue;
}
ieee80211_wake_queue(mvm->hw, q);
} }
ieee80211_wake_queue(mvm->hw, mq);
} }
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@ -1145,12 +1194,17 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
/* make sure we have no running tx while configuring the seqno */ /* make sure we have no running tx while configuring the seqno */
synchronize_net(); synchronize_net();
iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data); /* configure wowlan configuration only if needed */
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
sizeof(wowlan_config_cmd), iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
&wowlan_config_cmd); &d0i3_iter_data);
if (ret)
return ret; ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
if (ret)
return ret;
}
return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
flags | CMD_MAKE_TRANS_IDLE, flags | CMD_MAKE_TRANS_IDLE,
@ -1257,7 +1311,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
}; };
struct iwl_wowlan_status *status; struct iwl_wowlan_status *status;
int ret; int ret;
u32 handled_reasons, wakeup_reasons; u32 handled_reasons, wakeup_reasons = 0;
__le16 *qos_seq = NULL; __le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
@ -1289,6 +1343,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
out: out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq); iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
wakeup_reasons);
/* qos_seq might point inside resp_pkt, so free it only now */ /* qos_seq might point inside resp_pkt, so free it only now */
if (get_status_cmd.resp_pkt) if (get_status_cmd.resp_pkt)
iwl_free_resp(&get_status_cmd); iwl_free_resp(&get_status_cmd);
@ -1338,17 +1395,38 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
return _iwl_mvm_exit_d0i3(mvm); return _iwl_mvm_exit_d0i3(mvm);
} }
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
.queue_full = iwl_mvm_stop_sw_queue, \
.queue_not_full = iwl_mvm_wake_sw_queue, \
.hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
.free_skb = iwl_mvm_free_skb, \
.nic_error = iwl_mvm_nic_error, \
.cmd_queue_full = iwl_mvm_cmd_queue_full, \
.nic_config = iwl_mvm_nic_config, \
.enter_d0i3 = iwl_mvm_enter_d0i3, \
.exit_d0i3 = iwl_mvm_exit_d0i3, \
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
.stop = iwl_op_mode_mvm_stop
static const struct iwl_op_mode_ops iwl_mvm_ops = { static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start, IWL_MVM_COMMON_OPS,
.stop = iwl_op_mode_mvm_stop, .rx = iwl_mvm_rx,
.rx = iwl_mvm_rx_dispatch, };
.queue_full = iwl_mvm_stop_sw_queue,
.queue_not_full = iwl_mvm_wake_sw_queue, static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
.hw_rf_kill = iwl_mvm_set_hw_rfkill_state, struct napi_struct *napi,
.free_skb = iwl_mvm_free_skb, struct iwl_rx_cmd_buffer *rxb,
.nic_error = iwl_mvm_nic_error, unsigned int queue)
.cmd_queue_full = iwl_mvm_cmd_queue_full, {
.nic_config = iwl_mvm_nic_config, struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
.enter_d0i3 = iwl_mvm_enter_d0i3,
.exit_d0i3 = iwl_mvm_exit_d0i3, iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
}
static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
IWL_MVM_COMMON_OPS,
.rx = iwl_mvm_rx_mq,
.rx_rss = iwl_mvm_rx_mq_rss,
}; };

View file

@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -33,6 +34,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -306,13 +308,50 @@ static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
return radar_detect; return radar_detect;
} }
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd,
bool host_awake)
{
int dtimper = vif->bss_conf.dtim_period ?: 1;
int skip;
/* disable, in case we're supposed to override */
cmd->skip_dtim_periods = 0;
cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
if (iwl_mvm_power_is_radar(vif))
return;
if (dtimper >= 10)
return;
/* TODO: check that multicast wake lock is off */
if (host_awake) {
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
return;
skip = 2;
} else {
int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
if (WARN_ON(!dtimper_tu))
return;
/* configure skip over dtim up to 306TU - 314 msec */
skip = max_t(u8, 1, 306 / dtimper_tu);
}
/* the firmware really expects "look at every X DTIMs", so add 1 */
cmd->skip_dtim_periods = 1 + skip;
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd) struct iwl_mac_power_cmd *cmd)
{ {
int dtimper, bi; int dtimper, bi;
int keep_alive; int keep_alive;
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused = struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif); iwl_mvm_vif_from_mac80211(vif);
@ -350,16 +389,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
} }
/* Check if radar detection is required on current channel */ iwl_mvm_power_config_skip_dtim(mvm, vif, cmd,
radar_detect = iwl_mvm_power_is_radar(vif); mvm->cur_ucode != IWL_UCODE_WOWLAN);
/* Check skip over DTIM conditions */
if (!radar_detect && (dtimper < 10) &&
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
cmd->skip_dtim_periods = 3;
}
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
cmd->rx_data_timeout = cmd->rx_data_timeout =
@ -440,14 +471,14 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
int iwl_mvm_power_update_device(struct iwl_mvm *mvm) int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
{ {
struct iwl_device_power_cmd cmd = { struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK), .flags = 0,
}; };
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
mvm->ps_disabled = true; mvm->ps_disabled = true;
if (mvm->ps_disabled) if (!mvm->ps_disabled)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK); cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 : if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
@ -964,24 +995,11 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
return 0; return 0;
iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_build_cmd(mvm, vif, &cmd);
if (enable) {
/* configure skip over dtim up to 306TU - 314 msec */
int dtimper = vif->bss_conf.dtim_period ?: 1;
int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
bool radar_detect = iwl_mvm_power_is_radar(vif);
if (WARN_ON(!dtimper_tu)) /* when enabling D0i3, override the skip-over-dtim configuration */
return 0; if (enable)
iwl_mvm_power_config_skip_dtim(mvm, vif, &cmd, false);
/* Check skip over DTIM conditions */
/* TODO: check that multicast wake lock is off */
if (!radar_detect && (dtimper < 10)) {
cmd.skip_dtim_periods = 306 / dtimper_tu;
if (cmd.skip_dtim_periods)
cmd.flags |= cpu_to_le16(
POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
}
iwl_mvm_power_log(mvm, &cmd); iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd)); memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));

View file

@ -524,14 +524,56 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
return lq_types[type]; return lq_types[type];
} }
static char *rs_pretty_rate(const struct rs_rate *rate)
{
static char buf[40];
static const char * const legacy_rates[] = {
[IWL_RATE_1M_INDEX] = "1M",
[IWL_RATE_2M_INDEX] = "2M",
[IWL_RATE_5M_INDEX] = "5.5M",
[IWL_RATE_11M_INDEX] = "11M",
[IWL_RATE_6M_INDEX] = "6M",
[IWL_RATE_9M_INDEX] = "9M",
[IWL_RATE_12M_INDEX] = "12M",
[IWL_RATE_18M_INDEX] = "18M",
[IWL_RATE_24M_INDEX] = "24M",
[IWL_RATE_36M_INDEX] = "36M",
[IWL_RATE_48M_INDEX] = "48M",
[IWL_RATE_54M_INDEX] = "54M",
};
static const char *const ht_vht_rates[] = {
[IWL_RATE_MCS_0_INDEX] = "MCS0",
[IWL_RATE_MCS_1_INDEX] = "MCS1",
[IWL_RATE_MCS_2_INDEX] = "MCS2",
[IWL_RATE_MCS_3_INDEX] = "MCS3",
[IWL_RATE_MCS_4_INDEX] = "MCS4",
[IWL_RATE_MCS_5_INDEX] = "MCS5",
[IWL_RATE_MCS_6_INDEX] = "MCS6",
[IWL_RATE_MCS_7_INDEX] = "MCS7",
[IWL_RATE_MCS_8_INDEX] = "MCS8",
[IWL_RATE_MCS_9_INDEX] = "MCS9",
};
const char *rate_str;
if (is_type_legacy(rate->type))
rate_str = legacy_rates[rate->index];
else if (is_type_ht(rate->type) || is_type_vht(rate->type))
rate_str = ht_vht_rates[rate->index];
else
rate_str = "BAD_RATE";
sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
rs_pretty_ant(rate->ant), rate_str);
return buf;
}
static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate, static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
const char *prefix) const char *prefix)
{ {
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n", "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
prefix, rs_pretty_lq_type(rate->type), prefix, rs_pretty_rate(rate), rate->bw,
rate->index, rs_pretty_ant(rate->ant), rate->sgi, rate->ldpc, rate->stbc);
rate->bw, rate->sgi, rate->ldpc, rate->stbc);
} }
static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
@ -562,8 +604,8 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
} }
static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_data, u8 tid, struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta) struct ieee80211_sta *sta)
{ {
int ret = -EAGAIN; int ret = -EAGAIN;
@ -1485,7 +1527,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
u32 target_tpt; u32 target_tpt;
int rate_idx; int rate_idx;
if (success_ratio > IWL_MVM_RS_SR_NO_DECREASE) { if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
target_tpt = 100 * expected_current_tpt; target_tpt = 100 * expected_current_tpt;
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n", "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
@ -1493,7 +1535,7 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
} else { } else {
target_tpt = lq_sta->last_tpt; target_tpt = lq_sta->last_tpt;
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n", "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
success_ratio, target_tpt); success_ratio, target_tpt);
} }
@ -1622,6 +1664,51 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
} }
static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
enum rs_action scale_action)
{
if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
return false;
if (!is_vht_siso(&tbl->rate))
return false;
if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
(tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
(scale_action == RS_ACTION_DOWNSCALE)) {
tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
tbl->rate.index = IWL_RATE_MCS_4_INDEX;
IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
goto tweaked;
}
/* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
* sustainable, i.e. we're past the test window. We can't go back
* if MCS5 is just tested as this will happen always after switching
* to 20Mhz MCS4 because the rate stats are cleared.
*/
if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
(((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
(scale_action == RS_ACTION_STAY)) ||
((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
(scale_action == RS_ACTION_UPSCALE)))) {
tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
tbl->rate.index = IWL_RATE_MCS_1_INDEX;
IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
goto tweaked;
}
return false;
tweaked:
rs_set_expected_tpt_table(lq_sta, tbl);
rs_rate_scale_clear_tbl_windows(mvm, tbl);
return true;
}
static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta, struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
@ -2174,9 +2261,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) && if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
(window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) { (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"(%s: %d): Test Window: succ %d total %d\n", "%s: Test Window: succ %d total %d\n",
rs_pretty_lq_type(rate->type), rs_pretty_rate(rate),
index, window->success_counter, window->counter); window->success_counter, window->counter);
/* Can't calculate this yet; not enough history */ /* Can't calculate this yet; not enough history */
window->average_tpt = IWL_INVALID_VALUE; window->average_tpt = IWL_INVALID_VALUE;
@ -2253,8 +2340,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
high_tpt = tbl->win[high].average_tpt; high_tpt = tbl->win[high].average_tpt;
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n", "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
rs_pretty_lq_type(rate->type), index, current_tpt, sr, rs_pretty_rate(rate), current_tpt, sr,
low, high, low_tpt, high_tpt); low, high, low_tpt, high_tpt);
scale_action = rs_get_rate_action(mvm, tbl, sr, low, high, scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
@ -2305,6 +2392,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
/* Replace uCode's rate table for the destination station. */ /* Replace uCode's rate table for the destination station. */
if (update_lq) { if (update_lq) {
tbl->rate.index = index; tbl->rate.index = index;
if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
rs_update_rate_tbl(mvm, sta, lq_sta, tbl); rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
} }
@ -2542,7 +2631,6 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
} }
} }
rs_dump_rate(mvm, rate, "OPTIMAL RATE");
return rate; return rate;
} }

View file

@ -202,7 +202,6 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return -1; return -1;
stats->flag |= RX_FLAG_DECRYPTED; stats->flag |= RX_FLAG_DECRYPTED;
IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
*crypt_len = IEEE80211_CCMP_HDR_LEN; *crypt_len = IEEE80211_CCMP_HDR_LEN;
return 0; return 0;
@ -299,13 +298,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
return; return;
} }
if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
phy_info->cfg_phy_cnt);
kfree_skb(skb);
return;
}
/* /*
* Keep packets with CRC errors (and with overrun) for monitor mode * Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad. * (otherwise the firmware discards them) but mark them as bad.
@ -354,8 +346,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
/* This is fine since we don't support multiple AP interfaces */ /* This is fine since we don't support multiple AP interfaces */
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
if (sta) { if (sta) {
struct iwl_mvm_sta *mvmsta; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta = iwl_mvm_sta_from_mac80211(sta);
rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
@ -459,7 +451,7 @@ static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
struct iwl_mvm_stat_data { struct iwl_mvm_stat_data {
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
__le32 mac_id; __le32 mac_id;
__s8 beacon_filter_average_energy; u8 beacon_filter_average_energy;
struct mvm_statistics_general_v8 *general; struct mvm_statistics_general_v8 *general;
}; };
@ -577,56 +569,33 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt) struct iwl_rx_packet *pkt)
{ {
size_t v8_len = sizeof(struct iwl_notif_statistics_v8); struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
size_t v10_len = sizeof(struct iwl_notif_statistics_v10);
struct iwl_mvm_stat_data data = { struct iwl_mvm_stat_data data = {
.mvm = mvm, .mvm = mvm,
}; };
u32 temperature; u32 temperature;
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) { if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats))
struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data; goto invalid;
if (iwl_rx_packet_payload_len(pkt) != v10_len) temperature = le32_to_cpu(stats->general.radio_temperature);
goto invalid; data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
stats->general.beacon_filter_average_energy;
temperature = le32_to_cpu(stats->general.radio_temperature); iwl_mvm_update_rx_statistics(mvm, &stats->rx);
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
stats->general.beacon_filter_average_energy;
iwl_mvm_update_rx_statistics(mvm, &stats->rx); mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
mvm->radio_stats.on_time_rf =
le64_to_cpu(stats->general.on_time_rf);
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.on_time_scan);
mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time); data.general = &stats->general;
mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
mvm->radio_stats.on_time_rf =
le64_to_cpu(stats->general.on_time_rf);
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.on_time_scan);
data.general = &stats->general;
} else {
struct iwl_notif_statistics_v8 *stats = (void *)&pkt->data;
if (iwl_rx_packet_payload_len(pkt) != v8_len)
goto invalid;
temperature = le32_to_cpu(stats->general.radio_temperature);
data.mac_id = stats->rx.general.mac_id;
data.beacon_filter_average_energy =
stats->general.beacon_filter_average_energy;
iwl_mvm_update_rx_statistics(mvm, &stats->rx);
}
iwl_mvm_rx_stats_check_trigger(mvm, pkt); iwl_mvm_rx_stats_check_trigger(mvm, pkt);
/* Only handle rx statistics temperature changes if async temp
* notifications are not supported
*/
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
iwl_mvm_tt_temp_changed(mvm, temperature);
ieee80211_iterate_active_interfaces(mvm->hw, ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator, iwl_mvm_stat_iterator,

View file

@ -750,8 +750,6 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
*/ */
return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
mvm->last_ebs_successful && mvm->last_ebs_successful &&
(n_iterations > 1 ||
fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE); vif->type != NL80211_IFTYPE_P2P_DEVICE);
} }

View file

@ -234,7 +234,9 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
/* Found a place for all queues - enable them */ /* Found a place for all queues - enable them */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout); mvmsta->hw_queue[ac],
iwl_mvm_ac_to_tx_fifo[ac], 0,
wdg_timeout);
mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
} }
@ -253,7 +255,7 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
/* disable the TDLS STA-specific queues */ /* disable the TDLS STA-specific queues */
sta_msk = mvmsta->tfd_queue_msk; sta_msk = mvmsta->tfd_queue_msk;
for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i, 0); iwl_mvm_disable_txq(mvm, i, i, 0, 0);
} }
int iwl_mvm_add_sta(struct iwl_mvm *mvm, int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@ -275,6 +277,11 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (sta_id == IWL_MVM_STATION_COUNT) if (sta_id == IWL_MVM_STATION_COUNT)
return -ENOSPC; return -ENOSPC;
if (vif->type == NL80211_IFTYPE_AP) {
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
spin_lock_init(&mvm_sta->lock); spin_lock_init(&mvm_sta->lock);
mvm_sta->sta_id = sta_id; mvm_sta->sta_id = sta_id;
@ -287,7 +294,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
/* HW restart, don't assume the memory has been zeroed */ /* HW restart, don't assume the memory has been zeroed */
atomic_set(&mvm->pending_frames[sta_id], 0); atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0; mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
mvm_sta->tfd_queue_msk = 0; mvm_sta->tfd_queue_msk = 0;
/* allocate new queues for a TDLS station */ /* allocate new queues for a TDLS station */
@ -467,7 +474,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
unsigned long i, msk = mvm->tfd_drained[sta_id]; unsigned long i, msk = mvm->tfd_drained[sta_id];
for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
iwl_mvm_disable_txq(mvm, i, 0); iwl_mvm_disable_txq(mvm, i, i, 0, 0);
mvm->tfd_drained[sta_id] = 0; mvm->tfd_drained[sta_id] = 0;
IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
@ -646,8 +653,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
/* Map Aux queue to fifo - needs to happen before adding Aux station */ /* Map Aux queue to fifo - needs to happen before adding Aux station */
iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST, wdg_timeout); IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* Allocate aux station and assign to it the aux queue */ /* Allocate aux station and assign to it the aux queue */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@ -918,6 +925,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data; struct iwl_mvm_tid_data *tid_data;
int txq_id; int txq_id;
int ret;
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL; return -EINVAL;
@ -930,17 +938,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
for (txq_id = mvm->first_agg_queue;
txq_id <= mvm->last_agg_queue; txq_id++)
if (mvm->queue_to_mac80211[txq_id] ==
IWL_INVALID_MAC80211_QUEUE)
break;
if (txq_id > mvm->last_agg_queue) {
IWL_ERR(mvm, "Failed to allocate agg queue\n");
return -EIO;
}
spin_lock_bh(&mvmsta->lock); spin_lock_bh(&mvmsta->lock);
/* possible race condition - we entered D0i3 while starting agg */ /* possible race condition - we entered D0i3 while starting agg */
@ -950,8 +947,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO; return -EIO;
} }
/* the new tx queue is still connected to the same mac80211 queue */ spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
mvm->last_agg_queue);
if (txq_id < 0) {
ret = txq_id;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
}
mvm->queue_info[txq_id].setup_reserved = true;
spin_unlock_bh(&mvm->queue_info_lock);
tid_data = &mvmsta->tid_data[tid]; tid_data = &mvmsta->tid_data[tid];
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@ -970,9 +977,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
} }
ret = 0;
release_locks:
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
return 0; return ret;
} }
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@ -1000,13 +1010,19 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid, iwl_mvm_enable_agg_txq(mvm, queue,
buf_size, ssn, wdg_timeout); vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret) if (ret)
return -EIO; return -EIO;
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].setup_reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
/* /*
* Even though in theory the peer could have different * Even though in theory the peer could have different
* aggregation reorder buffer sizes for different sessions, * aggregation reorder buffer sizes for different sessions,
@ -1051,6 +1067,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid); mvmsta->agg_tids &= ~BIT(tid);
/* No need to mark as reserved anymore */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[txq_id].setup_reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
switch (tid_data->state) { switch (tid_data->state) {
case IWL_AGG_ON: case IWL_AGG_ON:
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@ -1068,14 +1089,15 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->ssn = 0xffff; tid_data->ssn = 0xffff;
tid_data->state = IWL_AGG_OFF; tid_data->state = IWL_AGG_OFF;
mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_mvm_disable_txq(mvm, txq_id, 0); iwl_mvm_disable_txq(mvm, txq_id,
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
0);
return 0; return 0;
case IWL_AGG_STARTING: case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA: case IWL_EMPTYING_HW_QUEUE_ADDBA:
@ -1086,7 +1108,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* No barriers since we are under mutex */ /* No barriers since we are under mutex */
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
tid_data->state = IWL_AGG_OFF; tid_data->state = IWL_AGG_OFF;
@ -1127,6 +1148,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid); mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock); spin_unlock_bh(&mvmsta->lock);
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[txq_id].setup_reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
if (old_state >= IWL_AGG_ON) { if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true); iwl_mvm_drain_sta(mvm, mvmsta, true);
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
@ -1137,12 +1163,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0); iwl_mvm_disable_txq(mvm, tid_data->txq_id,
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
0);
} }
mvm->queue_to_mac80211[tid_data->txq_id] =
IWL_INVALID_MAC80211_QUEUE;
return 0; return 0;
} }

View file

@ -178,12 +178,14 @@ int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
return -EINVAL; return -EINVAL;
if (vif->p2p || vif->type != NL80211_IFTYPE_AP) { if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
!mvmvif->ap_ibss_active) {
IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
return -EIO; return -EIO;
} }
cmd->sta_id = mvmvif->bcast_sta.sta_id; cmd->sta_id = mvmvif->bcast_sta.sta_id;
memcpy(cmd->bssid, vif->addr, ETH_ALEN);
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
IWL_ALWAYS_LONG_GROUP, 0), IWL_ALWAYS_LONG_GROUP, 0),
0, sizeof(*cmd), cmd); 0, sizeof(*cmd), cmd);

View file

@ -60,7 +60,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* *
*****************************************************************************/ *****************************************************************************/
#ifndef __tof #ifndef __tof_h__
#define __tof_h__ #define __tof_h__
#include "fw-api-tof.h" #include "fw-api-tof.h"

View file

@ -176,17 +176,27 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
struct iwl_dts_measurement_cmd cmd = { struct iwl_dts_measurement_cmd cmd = {
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
}; };
u32 cmdid;
return iwl_mvm_send_cmd_pdu(mvm, CMD_DTS_MEASUREMENT_TRIGGER, 0, if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
PHY_OPS_GROUP, 0);
else
cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0,
sizeof(cmd), &cmd); sizeof(cmd), &cmd);
} }
int iwl_mvm_get_temp(struct iwl_mvm *mvm) int iwl_mvm_get_temp(struct iwl_mvm *mvm)
{ {
struct iwl_notification_wait wait_temp_notif; struct iwl_notification_wait wait_temp_notif;
static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION }; static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
DTS_MEASUREMENT_NOTIF_WIDE) };
int ret, temp; int ret, temp;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,

View file

@ -560,15 +560,10 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n", "Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed); tid_data->next_reclaimed);
iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC); iwl_mvm_disable_txq(mvm, tid_data->txq_id,
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
CMD_ASYNC);
tid_data->state = IWL_AGG_OFF; tid_data->state = IWL_AGG_OFF;
/*
* we can't hold the mutex - but since we are after a sequence
* point (call to iwl_mvm_disable_txq(), so we don't even need
* a memory barrier.
*/
mvm->queue_to_mac80211[tid_data->txq_id] =
IWL_INVALID_MAC80211_QUEUE;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break; break;

View file

@ -7,6 +7,7 @@
* *
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -657,45 +658,143 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
if (mvm->support_umac_log) if (mvm->support_umac_log)
iwl_mvm_dump_umac_error_log(mvm); iwl_mvm_dump_umac_error_log(mvm);
} }
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
const struct iwl_trans_txq_scd_cfg *cfg, int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
{
int i;
lockdep_assert_held(&mvm->queue_info_lock);
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
!mvm->queue_info[i].setup_reserved)
return i;
return -ENOSPC;
}
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout) unsigned int wdg_timeout)
{ {
struct iwl_scd_txq_cfg_cmd cmd = { bool enable_queue = true;
.scd_queue = queue,
.enable = 1,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = cfg->fifo,
.aggregate = cfg->aggregate,
.tid = cfg->tid,
};
if (!iwl_mvm_is_scd_cfg_supported(mvm)) { spin_lock_bh(&mvm->queue_info_lock);
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg,
wdg_timeout); /* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
cfg->tid);
return; return;
} }
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); /* Update mappings and refcounts */
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); mvm->queue_info[queue].hw_queue_refcount++;
if (mvm->queue_info[queue].hw_queue_refcount > 1)
enable_queue = false;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
mvm->queue_info[queue].hw_queue_to_mac80211);
spin_unlock_bh(&mvm->queue_info_lock);
/* Send the enabling command if we need to */
if (enable_queue) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.enable = 1,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = cfg->fifo,
.aggregate = cfg->aggregate,
.tid = cfg->tid,
};
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
&cmd),
"Failed to configure queue %d on FIFO %d\n", queue,
cfg->fifo);
}
} }
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags) void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags)
{ {
struct iwl_scd_txq_cfg_cmd cmd = { struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue, .scd_queue = queue,
.enable = 0, .enable = 0,
}; };
bool remove_mac_queue = true;
int ret; int ret;
if (!iwl_mvm_is_scd_cfg_supported(mvm)) { spin_lock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, true);
if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
spin_unlock_bh(&mvm->queue_info_lock);
return; return;
} }
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
/*
* If there is another TID with the same AC - don't remove the MAC queue
* from the mapping
*/
if (tid < IWL_MAX_TID_COUNT) {
unsigned long tid_bitmap =
mvm->queue_info[queue].tid_bitmap;
int ac = tid_to_mac80211_ac[tid];
int i;
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
if (tid_to_mac80211_ac[i] == ac)
remove_mac_queue = false;
}
}
if (remove_mac_queue)
mvm->queue_info[queue].hw_queue_to_mac80211 &=
~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--;
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
IWL_DEBUG_TX_QUEUES(mvm,
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue,
mvm->queue_info[queue].hw_queue_refcount,
mvm->queue_info[queue].hw_queue_to_mac80211);
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.enable) {
spin_unlock_bh(&mvm->queue_info_lock);
return;
}
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
mvm->queue_info[queue].tid_bitmap ||
mvm->queue_info[queue].hw_queue_to_mac80211,
"TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
mvm->queue_info[queue].hw_queue_to_mac80211,
mvm->queue_info[queue].tid_bitmap);
/* If we are here - the queue is freed and we can zero out these vals */
mvm->queue_info[queue].hw_queue_refcount = 0;
mvm->queue_info[queue].tid_bitmap = 0;
mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false); iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(cmd), &cmd); sizeof(cmd), &cmd);

View file

@ -201,6 +201,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT; tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
skb_aggr->priority = skb_src->priority; skb_aggr->priority = skb_src->priority;
skb_aggr->tstamp = skb_src->tstamp;
skb_aggr->tstamp = ktime_get_real(); skb_aggr->tstamp = ktime_get_real();
@ -256,8 +257,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
} }
if (adapter->iface_type == MWIFIEX_USB) { if (adapter->iface_type == MWIFIEX_USB) {
adapter->data_sent = true; ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
skb_aggr, NULL); skb_aggr, NULL);
} else { } else {
if (skb_src) if (skb_src)
@ -297,16 +297,12 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break; break;
case -1: case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n", mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n",
__func__, ret); __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++; adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb_aggr, 1, ret); mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
return 0; return 0;
case -EINPROGRESS: case -EINPROGRESS:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
break; break;
case 0: case 0:
mwifiex_write_data_complete(adapter, skb_aggr, 1, ret); mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);

View file

@ -1994,8 +1994,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
CFG80211_BSS_FTYPE_UNKNOWN, CFG80211_BSS_FTYPE_UNKNOWN,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL); 0, ie_buf, ie_len, 0, GFP_KERNEL);
cfg80211_put_bss(priv->wdev.wiphy, bss); if (bss) {
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN); cfg80211_put_bss(priv->wdev.wiphy, bss);
ether_addr_copy(priv->cfg_bssid, bss_info.bssid);
}
return 0; return 0;
} }
@ -2859,14 +2861,14 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_ADHOC:
adapter->curr_iface_comb.sta_intf++; adapter->curr_iface_comb.sta_intf--;
break; break;
case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP:
adapter->curr_iface_comb.uap_intf++; adapter->curr_iface_comb.uap_intf--;
break; break;
case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_GO:
adapter->curr_iface_comb.p2p_intf++; adapter->curr_iface_comb.p2p_intf--;
break; break;
default: default:
mwifiex_dbg(adapter, ERROR, mwifiex_dbg(adapter, ERROR,

View file

@ -731,7 +731,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
(struct mwifiex_private *) file->private_data; (struct mwifiex_private *) file->private_data;
unsigned long addr = get_zeroed_page(GFP_KERNEL); unsigned long addr = get_zeroed_page(GFP_KERNEL);
char *buf = (char *) addr; char *buf = (char *) addr;
int pos = 0, ret = 0, i; int pos, ret, i;
u8 value[MAX_EEPROM_DATA]; u8 value[MAX_EEPROM_DATA];
if (!buf) if (!buf)
@ -739,7 +739,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
if (saved_offset == -1) { if (saved_offset == -1) {
/* No command has been given */ /* No command has been given */
pos += snprintf(buf, PAGE_SIZE, "0"); pos = snprintf(buf, PAGE_SIZE, "0");
goto done; goto done;
} }
@ -748,17 +748,17 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
(u16) saved_bytes, value); (u16) saved_bytes, value);
if (ret) { if (ret) {
ret = -EINVAL; ret = -EINVAL;
goto done; goto out_free;
} }
pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes); pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
for (i = 0; i < saved_bytes; i++) for (i = 0; i < saved_bytes; i++)
pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]); pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]);
ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
done: done:
ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
out_free:
free_page(addr); free_page(addr);
return ret; return ret;
} }

View file

@ -104,6 +104,7 @@ enum KEY_TYPE_ID {
enum mwifiex_usb_ep { enum mwifiex_usb_ep {
MWIFIEX_USB_EP_CMD_EVENT = 1, MWIFIEX_USB_EP_CMD_EVENT = 1,
MWIFIEX_USB_EP_DATA = 2, MWIFIEX_USB_EP_DATA = 2,
MWIFIEX_USB_EP_DATA_CH2 = 3,
}; };
enum MWIFIEX_802_11_PRIVACY_FILTER { enum MWIFIEX_802_11_PRIVACY_FILTER {
@ -173,6 +174,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
#define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183) #define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183)
#define TLV_TYPE_MC_GROUP_INFO (PROPRIETARY_TLV_BASE_ID + 184)
#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
#define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197) #define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197)
#define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199) #define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
@ -1984,6 +1986,22 @@ struct mwifiex_ie_types_multi_chan_info {
u8 tlv_buffer[0]; u8 tlv_buffer[0];
} __packed; } __packed;
struct mwifiex_ie_types_mc_group_info {
struct mwifiex_ie_types_header header;
u8 chan_group_id;
u8 chan_buf_weight;
u8 band_config;
u8 chan_num;
u32 chan_time;
u32 reserved;
union {
u8 sdio_func_num;
u8 usb_ep_num;
} hid_num;
u8 intf_num;
u8 bss_type_numlist[0];
} __packed;
struct meas_rpt_map { struct meas_rpt_map {
u8 rssi:3; u8 rssi:3;
u8 unmeasured:1; u8 unmeasured:1;

View file

@ -78,6 +78,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->media_connected = false; priv->media_connected = false;
eth_broadcast_addr(priv->curr_addr); eth_broadcast_addr(priv->curr_addr);
priv->port_open = false; priv->port_open = false;
priv->usb_port = MWIFIEX_USB_EP_DATA;
priv->pkt_tx_ctrl = 0; priv->pkt_tx_ctrl = 0;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->data_rate = 0; /* Initially indicate the rate as auto */ priv->data_rate = 0; /* Initially indicate the rate as auto */

View file

@ -294,9 +294,15 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
/* We have tried to wakeup the card already */ /* We have tried to wakeup the card already */
if (adapter->pm_wakeup_fw_try) if (adapter->pm_wakeup_fw_try)
break; break;
if (adapter->ps_state != PS_STATE_AWAKE || if (adapter->ps_state != PS_STATE_AWAKE)
adapter->tx_lock_flag)
break; break;
if (adapter->tx_lock_flag) {
if (adapter->iface_type == MWIFIEX_USB) {
if (!adapter->usb_mc_setup)
break;
} else
break;
}
if ((!adapter->scan_chan_gap_enabled && if ((!adapter->scan_chan_gap_enabled &&
adapter->scan_processing) || adapter->data_sent || adapter->scan_processing) || adapter->data_sent ||
@ -345,11 +351,18 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
*/ */
if ((adapter->ps_state == PS_STATE_SLEEP) || if ((adapter->ps_state == PS_STATE_SLEEP) ||
(adapter->ps_state == PS_STATE_PRE_SLEEP) || (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
(adapter->ps_state == PS_STATE_SLEEP_CFM) || (adapter->ps_state == PS_STATE_SLEEP_CFM)) {
adapter->tx_lock_flag){
continue; continue;
} }
if (adapter->tx_lock_flag) {
if (adapter->iface_type == MWIFIEX_USB) {
if (!adapter->usb_mc_setup)
continue;
} else
continue;
}
if (!adapter->cmd_sent && !adapter->curr_cmd && if (!adapter->cmd_sent && !adapter->curr_cmd &&
mwifiex_is_send_cmd_allowed mwifiex_is_send_cmd_allowed
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
@ -359,6 +372,13 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
} }
} }
/** If USB Multi channel setup ongoing,
* wait for ready to tx data.
*/
if (adapter->iface_type == MWIFIEX_USB &&
adapter->usb_mc_setup)
continue;
if ((adapter->scan_chan_gap_enabled || if ((adapter->scan_chan_gap_enabled ||
!adapter->scan_processing) && !adapter->scan_processing) &&
!adapter->data_sent && !adapter->data_sent &&
@ -928,6 +948,32 @@ mwifiex_tx_timeout(struct net_device *dev)
} }
} }
void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
{
struct usb_card_rec *card = adapter->card;
struct mwifiex_private *priv;
u16 tx_buf_size;
int i, ret;
card->mc_resync_flag = true;
for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
if (atomic_read(&card->port[i].tx_data_urb_pending)) {
mwifiex_dbg(adapter, WARN, "pending data urb in sys\n");
return;
}
}
card->mc_resync_flag = false;
tx_buf_size = 0xffff;
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
HostCmd_ACT_GEN_SET, 0, &tx_buf_size, false);
if (ret)
mwifiex_dbg(adapter, ERROR,
"send reconfig tx buf size cmd err\n");
}
EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
{ {
void *p; void *p;
@ -963,8 +1009,10 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
cardp = (struct usb_card_rec *)adapter->card; cardp = (struct usb_card_rec *)adapter->card;
p += sprintf(p, "tx_cmd_urb_pending = %d\n", p += sprintf(p, "tx_cmd_urb_pending = %d\n",
atomic_read(&cardp->tx_cmd_urb_pending)); atomic_read(&cardp->tx_cmd_urb_pending));
p += sprintf(p, "tx_data_urb_pending = %d\n", p += sprintf(p, "tx_data_urb_pending_port_0 = %d\n",
atomic_read(&cardp->tx_data_urb_pending)); atomic_read(&cardp->port[0].tx_data_urb_pending));
p += sprintf(p, "tx_data_urb_pending_port_1 = %d\n",
atomic_read(&cardp->port[1].tx_data_urb_pending));
p += sprintf(p, "rx_cmd_urb_pending = %d\n", p += sprintf(p, "rx_cmd_urb_pending = %d\n",
atomic_read(&cardp->rx_cmd_urb_pending)); atomic_read(&cardp->rx_cmd_urb_pending));
p += sprintf(p, "rx_data_urb_pending = %d\n", p += sprintf(p, "rx_data_urb_pending = %d\n",
@ -1447,6 +1495,26 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
} }
EXPORT_SYMBOL_GPL(mwifiex_remove_card); EXPORT_SYMBOL_GPL(mwifiex_remove_card);
void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!adapter->dev || !(adapter->debug_mask & mask))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
dev_info(adapter->dev, "%pV", &vaf);
va_end(args);
}
EXPORT_SYMBOL_GPL(_mwifiex_dbg);
/* /*
* This function initializes the module. * This function initializes the module.
* *

View file

@ -48,6 +48,9 @@
extern const char driver_version[]; extern const char driver_version[];
struct mwifiex_adapter;
struct mwifiex_private;
enum { enum {
MWIFIEX_ASYNC_CMD, MWIFIEX_ASYNC_CMD,
MWIFIEX_SYNC_CMD MWIFIEX_SYNC_CMD
@ -180,12 +183,11 @@ enum MWIFIEX_DEBUG_LEVEL {
MWIFIEX_DBG_FATAL | \ MWIFIEX_DBG_FATAL | \
MWIFIEX_DBG_ERROR) MWIFIEX_DBG_ERROR)
#define mwifiex_dbg(adapter, dbg_mask, fmt, args...) \ __printf(3, 4)
do { \ void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \ const char *fmt, ...);
if ((adapter)->dev) \ #define mwifiex_dbg(adapter, mask, fmt, ...) \
dev_info((adapter)->dev, fmt, ## args); \ _mwifiex_dbg(adapter, MWIFIEX_DBG_##mask, fmt, ##__VA_ARGS__)
} while (0)
#define DEBUG_DUMP_DATA_MAX_LEN 128 #define DEBUG_DUMP_DATA_MAX_LEN 128
#define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len) \ #define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len) \
@ -506,9 +508,6 @@ enum mwifiex_iface_work_flags {
MWIFIEX_IFACE_WORK_CARD_RESET, MWIFIEX_IFACE_WORK_CARD_RESET,
}; };
struct mwifiex_adapter;
struct mwifiex_private;
struct mwifiex_private { struct mwifiex_private {
struct mwifiex_adapter *adapter; struct mwifiex_adapter *adapter;
u8 bss_type; u8 bss_type;
@ -520,6 +519,7 @@ struct mwifiex_private {
u8 curr_addr[ETH_ALEN]; u8 curr_addr[ETH_ALEN];
u8 media_connected; u8 media_connected;
u8 port_open; u8 port_open;
u8 usb_port;
u32 num_tx_timeout; u32 num_tx_timeout;
/* track consecutive timeout */ /* track consecutive timeout */
u8 tx_timeout_cnt; u8 tx_timeout_cnt;
@ -816,6 +816,8 @@ struct mwifiex_if_ops {
void (*iface_work)(struct work_struct *work); void (*iface_work)(struct work_struct *work);
void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *); void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
void (*multi_port_resync)(struct mwifiex_adapter *);
bool (*is_port_ready)(struct mwifiex_private *);
}; };
struct mwifiex_adapter { struct mwifiex_adapter {
@ -861,6 +863,8 @@ struct mwifiex_adapter {
u8 more_task_flag; u8 more_task_flag;
u16 tx_buf_size; u16 tx_buf_size;
u16 curr_tx_buf_size; u16 curr_tx_buf_size;
/* sdio single port rx aggregation capability */
bool host_disable_sdio_rx_aggr;
bool sdio_rx_aggr_enable; bool sdio_rx_aggr_enable;
u16 sdio_rx_block_size; u16 sdio_rx_block_size;
u32 ioport; u32 ioport;
@ -988,6 +992,8 @@ struct mwifiex_adapter {
u8 coex_rx_win_size; u8 coex_rx_win_size;
bool drcs_enabled; bool drcs_enabled;
u8 active_scan_triggered; u8 active_scan_triggered;
bool usb_mc_status;
bool usb_mc_setup;
}; };
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@ -1561,6 +1567,7 @@ void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
struct sk_buff *event); struct sk_buff *event);
void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
struct sk_buff *event_skb); struct sk_buff *event_skb);
void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void); void mwifiex_debugfs_init(void);

View file

@ -1815,7 +1815,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if (!card->evt_buf_list[rdptr]) { if (!card->evt_buf_list[rdptr]) {
skb_push(skb, INTF_HEADER_LEN); skb_push(skb, INTF_HEADER_LEN);
skb_put(skb, MAX_EVENT_SIZE - skb->len); skb_put(skb, MAX_EVENT_SIZE - skb->len);
memset(skb->data, 0, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE, MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE)) PCI_DMA_FROMDEVICE))

View file

@ -1839,14 +1839,18 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
bssid, timestamp, bssid, timestamp,
cap_info_bitmap, beacon_period, cap_info_bitmap, beacon_period,
ie_buf, ie_len, rssi, GFP_KERNEL); ie_buf, ie_len, rssi, GFP_KERNEL);
bss_priv = (struct mwifiex_bss_priv *)bss->priv; if (bss) {
bss_priv->band = band; bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->fw_tsf = fw_tsf; bss_priv->band = band;
if (priv->media_connected && bss_priv->fw_tsf = fw_tsf;
!memcmp(bssid, priv->curr_bss_params.bss_descriptor if (priv->media_connected &&
.mac_address, ETH_ALEN)) !memcmp(bssid, priv->curr_bss_params.
mwifiex_update_curr_bss_params(priv, bss); bss_descriptor.mac_address,
cfg80211_put_bss(priv->wdev.wiphy, bss); ETH_ALEN))
mwifiex_update_curr_bss_params(priv,
bss);
cfg80211_put_bss(priv->wdev.wiphy, bss);
}
if ((chan->flags & IEEE80211_CHAN_RADAR) || if ((chan->flags & IEEE80211_CHAN_RADAR) ||
(chan->flags & IEEE80211_CHAN_NO_IR)) { (chan->flags & IEEE80211_CHAN_NO_IR)) {
@ -1889,7 +1893,7 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
u8 id = 0; u8 id = 0;
struct mwifiex_user_scan_cfg *user_scan_cfg; struct mwifiex_user_scan_cfg *user_scan_cfg;
if (adapter->active_scan_triggered) { if (adapter->active_scan_triggered || !priv->scan_request) {
adapter->active_scan_triggered = false; adapter->active_scan_triggered = false;
return 0; return 0;
} }

View file

@ -1606,8 +1606,9 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
(rx_len + MWIFIEX_SDIO_BLOCK_SIZE - (rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1) / MWIFIEX_SDIO_BLOCK_SIZE; 1) / MWIFIEX_SDIO_BLOCK_SIZE;
if (rx_len <= INTF_HEADER_LEN || if (rx_len <= INTF_HEADER_LEN ||
(rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > (card->mpa_rx.enabled &&
card->mpa_rx.buf_size) { ((rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
card->mpa_rx.buf_size))) {
mwifiex_dbg(adapter, ERROR, mwifiex_dbg(adapter, ERROR,
"invalid rx_len=%d\n", "invalid rx_len=%d\n",
rx_len); rx_len);
@ -1925,6 +1926,8 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
if (ret) { if (ret) {
kfree(card->mpa_tx.buf); kfree(card->mpa_tx.buf);
kfree(card->mpa_rx.buf); kfree(card->mpa_rx.buf);
card->mpa_tx.buf_size = 0;
card->mpa_rx.buf_size = 0;
} }
return ret; return ret;
@ -2055,16 +2058,26 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
ret = mwifiex_alloc_sdio_mpa_buffers(adapter, ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
card->mp_tx_agg_buf_size, card->mp_tx_agg_buf_size,
card->mp_rx_agg_buf_size); card->mp_rx_agg_buf_size);
if (ret) {
mwifiex_dbg(adapter, ERROR, /* Allocate 32k MPA Tx/Rx buffers if 64k memory allocation fails */
"failed to alloc sdio mp-a buffers\n"); if (ret && (card->mp_tx_agg_buf_size == MWIFIEX_MP_AGGR_BUF_SIZE_MAX ||
kfree(card->mp_regs); card->mp_rx_agg_buf_size == MWIFIEX_MP_AGGR_BUF_SIZE_MAX)) {
return -1; /* Disable rx single port aggregation */
adapter->host_disable_sdio_rx_aggr = true;
ret = mwifiex_alloc_sdio_mpa_buffers
(adapter, MWIFIEX_MP_AGGR_BUF_SIZE_32K,
MWIFIEX_MP_AGGR_BUF_SIZE_32K);
if (ret) {
/* Disable multi port aggregation */
card->mpa_tx.enabled = 0;
card->mpa_rx.enabled = 0;
}
} }
adapter->auto_tdls = card->can_auto_tdls; adapter->auto_tdls = card->can_auto_tdls;
adapter->ext_scan = card->can_ext_scan; adapter->ext_scan = card->can_ext_scan;
return ret; return 0;
} }
/* /*

View file

@ -2125,7 +2125,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
/** Set SDIO Single Port RX Aggr Info */ /** Set SDIO Single Port RX Aggr Info */
if (priv->adapter->iface_type == MWIFIEX_SDIO && if (priv->adapter->iface_type == MWIFIEX_SDIO &&
ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info)) { ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info) &&
!priv->adapter->host_disable_sdio_rx_aggr) {
sdio_sp_rx_aggr_enable = true; sdio_sp_rx_aggr_enable = true;
ret = mwifiex_send_cmd(priv, ret = mwifiex_send_cmd(priv,
HostCmd_CMD_SDIO_SP_RX_AGGR_CFG, HostCmd_CMD_SDIO_SP_RX_AGGR_CFG,

Some files were not shown because too many files have changed in this diff Show more