mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-12 08:09:56 +00:00
Merge branch 'master' into for-davem
Conflicts: drivers/net/wireless/iwlwifi/iwl-testmode.c net/wireless/nl80211.c
This commit is contained in:
commit
7eab0f64a9
@ -516,7 +516,7 @@
|
|||||||
!Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe
|
!Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe
|
||||||
!Finclude/net/mac80211.h ieee80211_stop_tx_ba_session
|
!Finclude/net/mac80211.h ieee80211_stop_tx_ba_session
|
||||||
!Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe
|
!Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe
|
||||||
!Finclude/net/mac80211.h rate_control_changed
|
!Finclude/net/mac80211.h ieee80211_rate_control_changed
|
||||||
!Finclude/net/mac80211.h ieee80211_tx_rate_control
|
!Finclude/net/mac80211.h ieee80211_tx_rate_control
|
||||||
!Finclude/net/mac80211.h rate_control_send_low
|
!Finclude/net/mac80211.h rate_control_send_low
|
||||||
</chapter>
|
</chapter>
|
||||||
|
@ -23,7 +23,7 @@ BA session stop & deauth/disassoc frames
|
|||||||
end note
|
end note
|
||||||
end
|
end
|
||||||
|
|
||||||
mac80211->driver: config(channel, non-HT)
|
mac80211->driver: config(channel, channel type)
|
||||||
mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
|
mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
|
||||||
mac80211->driver: sta_state(AP, exists)
|
mac80211->driver: sta_state(AP, exists)
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ note over mac80211,driver: cleanup like for authenticate
|
|||||||
end
|
end
|
||||||
|
|
||||||
alt not previously authenticated (FT)
|
alt not previously authenticated (FT)
|
||||||
mac80211->driver: config(channel, non-HT)
|
mac80211->driver: config(channel, channel type)
|
||||||
mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
|
mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
|
||||||
mac80211->driver: sta_state(AP, exists)
|
mac80211->driver: sta_state(AP, exists)
|
||||||
mac80211->driver: sta_state(AP, authenticated)
|
mac80211->driver: sta_state(AP, authenticated)
|
||||||
@ -67,10 +67,6 @@ end
|
|||||||
|
|
||||||
mac80211->driver: set up QoS parameters
|
mac80211->driver: set up QoS parameters
|
||||||
|
|
||||||
alt is HT channel
|
|
||||||
mac80211->driver: config(channel, HT params)
|
|
||||||
end
|
|
||||||
|
|
||||||
mac80211->driver: bss_info_changed(QoS, HT, associated with AID)
|
mac80211->driver: bss_info_changed(QoS, HT, associated with AID)
|
||||||
mac80211->userspace: associated
|
mac80211->userspace: associated
|
||||||
|
|
||||||
@ -95,5 +91,5 @@ mac80211->driver: sta_state(AP,exists)
|
|||||||
mac80211->driver: sta_state(AP,not-exists)
|
mac80211->driver: sta_state(AP,not-exists)
|
||||||
mac80211->driver: turn off powersave
|
mac80211->driver: turn off powersave
|
||||||
mac80211->driver: bss_info_changed(clear BSSID, not associated, no QoS, ...)
|
mac80211->driver: bss_info_changed(clear BSSID, not associated, no QoS, ...)
|
||||||
mac80211->driver: config(non-HT channel type)
|
mac80211->driver: config(channel type to non-HT)
|
||||||
mac80211->userspace: disconnected
|
mac80211->userspace: disconnected
|
||||||
|
@ -1522,8 +1522,8 @@ M: Gustavo Padovan <gustavo@padovan.org>
|
|||||||
M: Johan Hedberg <johan.hedberg@gmail.com>
|
M: Johan Hedberg <johan.hedberg@gmail.com>
|
||||||
L: linux-bluetooth@vger.kernel.org
|
L: linux-bluetooth@vger.kernel.org
|
||||||
W: http://www.bluez.org/
|
W: http://www.bluez.org/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/bluetooth/
|
F: drivers/bluetooth/
|
||||||
|
|
||||||
@ -1533,8 +1533,8 @@ M: Gustavo Padovan <gustavo@padovan.org>
|
|||||||
M: Johan Hedberg <johan.hedberg@gmail.com>
|
M: Johan Hedberg <johan.hedberg@gmail.com>
|
||||||
L: linux-bluetooth@vger.kernel.org
|
L: linux-bluetooth@vger.kernel.org
|
||||||
W: http://www.bluez.org/
|
W: http://www.bluez.org/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: net/bluetooth/
|
F: net/bluetooth/
|
||||||
F: include/net/bluetooth/
|
F: include/net/bluetooth/
|
||||||
|
@ -29,7 +29,7 @@ config BCMA_HOST_PCI
|
|||||||
|
|
||||||
config BCMA_DRIVER_PCI_HOSTMODE
|
config BCMA_DRIVER_PCI_HOSTMODE
|
||||||
bool "Driver for PCI core working in hostmode"
|
bool "Driver for PCI core working in hostmode"
|
||||||
depends on BCMA && MIPS
|
depends on BCMA && MIPS && BCMA_HOST_PCI
|
||||||
help
|
help
|
||||||
PCI core hostmode operation (external PCI bus).
|
PCI core hostmode operation (external PCI bus).
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "bcma_private.h"
|
#include "bcma_private.h"
|
||||||
|
#include <linux/pci.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/bcma/bcma.h>
|
#include <linux/bcma/bcma.h>
|
||||||
#include <asm/paccess.h>
|
#include <asm/paccess.h>
|
||||||
|
@ -72,7 +72,9 @@ static struct usb_device_id ath3k_table[] = {
|
|||||||
|
|
||||||
/* Atheros AR3012 with sflash firmware*/
|
/* Atheros AR3012 with sflash firmware*/
|
||||||
{ USB_DEVICE(0x0CF3, 0x3004) },
|
{ USB_DEVICE(0x0CF3, 0x3004) },
|
||||||
|
{ USB_DEVICE(0x0CF3, 0x311D) },
|
||||||
{ USB_DEVICE(0x13d3, 0x3375) },
|
{ USB_DEVICE(0x13d3, 0x3375) },
|
||||||
|
{ USB_DEVICE(0x04CA, 0x3005) },
|
||||||
|
|
||||||
/* Atheros AR5BBU12 with sflash firmware */
|
/* Atheros AR5BBU12 with sflash firmware */
|
||||||
{ USB_DEVICE(0x0489, 0xE02C) },
|
{ USB_DEVICE(0x0489, 0xE02C) },
|
||||||
@ -89,7 +91,9 @@ static struct usb_device_id ath3k_blist_tbl[] = {
|
|||||||
|
|
||||||
/* Atheros AR3012 with sflash firmware*/
|
/* Atheros AR3012 with sflash firmware*/
|
||||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||||
|
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
|
||||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||||
|
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||||
|
|
||||||
{ } /* Terminating entry */
|
{ } /* Terminating entry */
|
||||||
};
|
};
|
||||||
|
@ -61,7 +61,7 @@ static struct usb_device_id btusb_table[] = {
|
|||||||
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
|
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
|
||||||
|
|
||||||
/* Broadcom SoftSailing reporting vendor specific */
|
/* Broadcom SoftSailing reporting vendor specific */
|
||||||
{ USB_DEVICE(0x05ac, 0x21e1) },
|
{ USB_DEVICE(0x0a5c, 0x21e1) },
|
||||||
|
|
||||||
/* Apple MacBookPro 7,1 */
|
/* Apple MacBookPro 7,1 */
|
||||||
{ USB_DEVICE(0x05ac, 0x8213) },
|
{ USB_DEVICE(0x05ac, 0x8213) },
|
||||||
@ -103,6 +103,7 @@ static struct usb_device_id btusb_table[] = {
|
|||||||
/* Broadcom BCM20702A0 */
|
/* Broadcom BCM20702A0 */
|
||||||
{ USB_DEVICE(0x0a5c, 0x21e3) },
|
{ USB_DEVICE(0x0a5c, 0x21e3) },
|
||||||
{ USB_DEVICE(0x0a5c, 0x21e6) },
|
{ USB_DEVICE(0x0a5c, 0x21e6) },
|
||||||
|
{ USB_DEVICE(0x0a5c, 0x21e8) },
|
||||||
{ USB_DEVICE(0x0a5c, 0x21f3) },
|
{ USB_DEVICE(0x0a5c, 0x21f3) },
|
||||||
{ USB_DEVICE(0x413c, 0x8197) },
|
{ USB_DEVICE(0x413c, 0x8197) },
|
||||||
|
|
||||||
@ -129,7 +130,9 @@ static struct usb_device_id blacklist_table[] = {
|
|||||||
|
|
||||||
/* Atheros 3012 with sflash firmware */
|
/* Atheros 3012 with sflash firmware */
|
||||||
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||||
|
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
|
||||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||||
|
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||||
|
|
||||||
/* Atheros AR5BBU12 with sflash firmware */
|
/* Atheros AR5BBU12 with sflash firmware */
|
||||||
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
|
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
|
||||||
|
@ -299,11 +299,11 @@ static void hci_uart_tty_close(struct tty_struct *tty)
|
|||||||
hci_uart_close(hdev);
|
hci_uart_close(hdev);
|
||||||
|
|
||||||
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
|
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
|
||||||
hu->proto->close(hu);
|
|
||||||
if (hdev) {
|
if (hdev) {
|
||||||
hci_unregister_dev(hdev);
|
hci_unregister_dev(hdev);
|
||||||
hci_free_dev(hdev);
|
hci_free_dev(hdev);
|
||||||
}
|
}
|
||||||
|
hu->proto->close(hu);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(hu);
|
kfree(hu);
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
#include "reg.h"
|
#include "reg.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
@ -728,32 +730,24 @@ void
|
|||||||
ath5k_ani_print_counters(struct ath5k_hw *ah)
|
ath5k_ani_print_counters(struct ath5k_hw *ah)
|
||||||
{
|
{
|
||||||
/* clears too */
|
/* clears too */
|
||||||
printk(KERN_NOTICE "ACK fail\t%d\n",
|
pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
|
||||||
ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
|
pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
|
||||||
printk(KERN_NOTICE "RTS fail\t%d\n",
|
pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK));
|
||||||
ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
|
pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
|
||||||
printk(KERN_NOTICE "RTS success\t%d\n",
|
|
||||||
ath5k_hw_reg_read(ah, AR5K_RTS_OK));
|
|
||||||
printk(KERN_NOTICE "FCS error\t%d\n",
|
|
||||||
ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
|
|
||||||
|
|
||||||
/* no clear */
|
/* no clear */
|
||||||
printk(KERN_NOTICE "tx\t%d\n",
|
pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
|
||||||
ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
|
pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
|
||||||
printk(KERN_NOTICE "rx\t%d\n",
|
pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
|
||||||
ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
|
pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
|
||||||
printk(KERN_NOTICE "busy\t%d\n",
|
|
||||||
ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
|
|
||||||
printk(KERN_NOTICE "cycles\t%d\n",
|
|
||||||
ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
|
|
||||||
|
|
||||||
printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
|
pr_notice("AR5K_PHYERR_CNT1\t%d\n",
|
||||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
|
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
|
||||||
printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
|
pr_notice("AR5K_PHYERR_CNT2\t%d\n",
|
||||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
|
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
|
||||||
printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
|
pr_notice("AR5K_OFDM_FIL_CNT\t%d\n",
|
||||||
ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
|
ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
|
||||||
printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
|
pr_notice("AR5K_CCK_FIL_CNT\t%d\n",
|
||||||
ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
|
ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,17 +77,20 @@
|
|||||||
\****************************/
|
\****************************/
|
||||||
|
|
||||||
#define ATH5K_PRINTF(fmt, ...) \
|
#define ATH5K_PRINTF(fmt, ...) \
|
||||||
printk(KERN_WARNING "%s: " fmt, __func__, ##__VA_ARGS__)
|
pr_warn("%s: " fmt, __func__, ##__VA_ARGS__)
|
||||||
|
|
||||||
|
void __printf(3, 4)
|
||||||
|
_ath5k_printk(const struct ath5k_hw *ah, const char *level,
|
||||||
|
const char *fmt, ...);
|
||||||
|
|
||||||
#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \
|
#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \
|
||||||
printk(_level "ath5k %s: " _fmt, \
|
_ath5k_printk(_sc, _level, _fmt, ##__VA_ARGS__)
|
||||||
((_sc) && (_sc)->hw) ? wiphy_name((_sc)->hw->wiphy) : "", \
|
|
||||||
##__VA_ARGS__)
|
|
||||||
|
|
||||||
#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) do { \
|
#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) \
|
||||||
|
do { \
|
||||||
if (net_ratelimit()) \
|
if (net_ratelimit()) \
|
||||||
ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
|
ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define ATH5K_INFO(_sc, _fmt, ...) \
|
#define ATH5K_INFO(_sc, _fmt, ...) \
|
||||||
ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__)
|
ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__)
|
||||||
|
@ -20,6 +20,8 @@
|
|||||||
* Attach/Detach Functions and helpers *
|
* Attach/Detach Functions and helpers *
|
||||||
\*************************************/
|
\*************************************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
|
@ -40,6 +40,8 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
@ -3038,3 +3040,23 @@ ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
|
|||||||
ath5k_hw_set_rx_filter(ah, rfilt);
|
ath5k_hw_set_rx_filter(ah, rfilt);
|
||||||
ah->filter_flags = rfilt;
|
ah->filter_flags = rfilt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
|
||||||
|
const char *fmt, ...)
|
||||||
|
{
|
||||||
|
struct va_format vaf;
|
||||||
|
va_list args;
|
||||||
|
|
||||||
|
va_start(args, fmt);
|
||||||
|
|
||||||
|
vaf.fmt = fmt;
|
||||||
|
vaf.va = &args;
|
||||||
|
|
||||||
|
if (ah && ah->hw)
|
||||||
|
printk("%s" pr_fmt("%s: %pV"),
|
||||||
|
level, wiphy_name(ah->hw->wiphy), &vaf);
|
||||||
|
else
|
||||||
|
printk("%s" pr_fmt("%pV"), level, &vaf);
|
||||||
|
|
||||||
|
va_end(args);
|
||||||
|
}
|
||||||
|
@ -57,6 +57,9 @@
|
|||||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||||
* THE POSSIBILITY OF SUCH DAMAGES.
|
* THE POSSIBILITY OF SUCH DAMAGES.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
|
|
||||||
@ -247,10 +250,10 @@ static ssize_t write_file_beacon(struct file *file,
|
|||||||
|
|
||||||
if (strncmp(buf, "disable", 7) == 0) {
|
if (strncmp(buf, "disable", 7) == 0) {
|
||||||
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
|
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
|
||||||
printk(KERN_INFO "debugfs disable beacons\n");
|
pr_info("debugfs disable beacons\n");
|
||||||
} else if (strncmp(buf, "enable", 6) == 0) {
|
} else if (strncmp(buf, "enable", 6) == 0) {
|
||||||
AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
|
AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
|
||||||
printk(KERN_INFO "debugfs enable beacons\n");
|
pr_info("debugfs enable beacons\n");
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -450,19 +453,19 @@ static ssize_t write_file_antenna(struct file *file,
|
|||||||
|
|
||||||
if (strncmp(buf, "diversity", 9) == 0) {
|
if (strncmp(buf, "diversity", 9) == 0) {
|
||||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
|
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
|
||||||
printk(KERN_INFO "ath5k debug: enable diversity\n");
|
pr_info("debug: enable diversity\n");
|
||||||
} else if (strncmp(buf, "fixed-a", 7) == 0) {
|
} else if (strncmp(buf, "fixed-a", 7) == 0) {
|
||||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
|
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
|
||||||
printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
|
pr_info("debug: fixed antenna A\n");
|
||||||
} else if (strncmp(buf, "fixed-b", 7) == 0) {
|
} else if (strncmp(buf, "fixed-b", 7) == 0) {
|
||||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
|
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
|
||||||
printk(KERN_INFO "ath5k debug: fixed antenna B\n");
|
pr_info("debug: fixed antenna B\n");
|
||||||
} else if (strncmp(buf, "clear", 5) == 0) {
|
} else if (strncmp(buf, "clear", 5) == 0) {
|
||||||
for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
|
for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
|
||||||
ah->stats.antenna_rx[i] = 0;
|
ah->stats.antenna_rx[i] = 0;
|
||||||
ah->stats.antenna_tx[i] = 0;
|
ah->stats.antenna_tx[i] = 0;
|
||||||
}
|
}
|
||||||
printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
|
pr_info("debug: cleared antenna stats\n");
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -632,7 +635,7 @@ static ssize_t write_file_frameerrors(struct file *file,
|
|||||||
st->txerr_fifo = 0;
|
st->txerr_fifo = 0;
|
||||||
st->txerr_filt = 0;
|
st->txerr_filt = 0;
|
||||||
st->tx_all_count = 0;
|
st->tx_all_count = 0;
|
||||||
printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
|
pr_info("debug: cleared frameerrors stats\n");
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
Hardware Descriptor Functions
|
Hardware Descriptor Functions
|
||||||
\******************************/
|
\******************************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
#include "reg.h"
|
#include "reg.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
@ -441,10 +443,8 @@ ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
|
|||||||
struct ath5k_desc *desc,
|
struct ath5k_desc *desc,
|
||||||
struct ath5k_tx_status *ts)
|
struct ath5k_tx_status *ts)
|
||||||
{
|
{
|
||||||
struct ath5k_hw_2w_tx_ctl *tx_ctl;
|
|
||||||
struct ath5k_hw_tx_status *tx_status;
|
struct ath5k_hw_tx_status *tx_status;
|
||||||
|
|
||||||
tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
|
|
||||||
tx_status = &desc->ud.ds_tx5210.tx_stat;
|
tx_status = &desc->ud.ds_tx5210.tx_stat;
|
||||||
|
|
||||||
/* No frame has been send or error */
|
/* No frame has been send or error */
|
||||||
@ -495,11 +495,9 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
|
|||||||
struct ath5k_desc *desc,
|
struct ath5k_desc *desc,
|
||||||
struct ath5k_tx_status *ts)
|
struct ath5k_tx_status *ts)
|
||||||
{
|
{
|
||||||
struct ath5k_hw_4w_tx_ctl *tx_ctl;
|
|
||||||
struct ath5k_hw_tx_status *tx_status;
|
struct ath5k_hw_tx_status *tx_status;
|
||||||
u32 txstat0, txstat1;
|
u32 txstat0, txstat1;
|
||||||
|
|
||||||
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
|
|
||||||
tx_status = &desc->ud.ds_tx5212.tx_stat;
|
tx_status = &desc->ud.ds_tx5212.tx_stat;
|
||||||
|
|
||||||
txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
|
txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
|
||||||
|
@ -29,6 +29,8 @@
|
|||||||
* status registers (ISR).
|
* status registers (ISR).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
#include "reg.h"
|
#include "reg.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
* EEPROM access functions and helpers *
|
* EEPROM access functions and helpers *
|
||||||
\*************************************/
|
\*************************************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
#include "reg.h"
|
#include "reg.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
@ -1574,8 +1576,7 @@ ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
|
|||||||
|
|
||||||
/* AR5K_MODE_11B */
|
/* AR5K_MODE_11B */
|
||||||
if (mode > 2) {
|
if (mode > 2) {
|
||||||
ATH5K_ERR(ah,
|
ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode);
|
||||||
"unsupported channel mode: %d\n", mode);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,8 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
|
|
||||||
|
@ -41,6 +41,8 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <net/mac80211.h>
|
#include <net/mac80211.h>
|
||||||
#include <asm/unaligned.h>
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/nl80211.h>
|
#include <linux/nl80211.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/pci-aspm.h>
|
#include <linux/pci-aspm.h>
|
||||||
@ -347,7 +349,7 @@ init_ath5k_pci(void)
|
|||||||
|
|
||||||
ret = pci_register_driver(&ath5k_pci_driver);
|
ret = pci_register_driver(&ath5k_pci_driver);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
|
pr_err("pci: can't register pci driver\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,6 +22,8 @@
|
|||||||
* PHY related functions *
|
* PHY related functions *
|
||||||
\***********************/
|
\***********************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <asm/unaligned.h>
|
#include <asm/unaligned.h>
|
||||||
|
@ -20,6 +20,8 @@
|
|||||||
Queue Control Unit, DCF Control Unit Functions
|
Queue Control Unit, DCF Control Unit Functions
|
||||||
\********************************************/
|
\********************************************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "ath5k.h"
|
#include "ath5k.h"
|
||||||
#include "reg.h"
|
#include "reg.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
Reset function and helpers
|
Reset function and helpers
|
||||||
\****************************/
|
\****************************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <asm/unaligned.h>
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
#include <linux/pci.h> /* To determine if a card is pci-e */
|
#include <linux/pci.h> /* To determine if a card is pci-e */
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/inetdevice.h>
|
#include <linux/inetdevice.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
@ -16,6 +16,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "core.h"
|
#include "core.h"
|
||||||
#include "hif-ops.h"
|
#include "hif-ops.h"
|
||||||
#include "cfg80211.h"
|
#include "cfg80211.h"
|
||||||
|
@ -15,6 +15,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "core.h"
|
#include "core.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
|
||||||
|
@ -11,7 +11,10 @@ ath9k-$(CONFIG_ATH9K_PCI) += pci.o
|
|||||||
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
|
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
|
||||||
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
|
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
|
||||||
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
|
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
|
||||||
ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
|
ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
|
||||||
|
dfs.o \
|
||||||
|
dfs_pattern_detector.o \
|
||||||
|
dfs_pri_detector.o
|
||||||
|
|
||||||
obj-$(CONFIG_ATH9K) += ath9k.o
|
obj-$(CONFIG_ATH9K) += ath9k.o
|
||||||
|
|
||||||
|
@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
|
|||||||
{ 5, 4, 1 }, /* lvl 5 */
|
{ 5, 4, 1 }, /* lvl 5 */
|
||||||
{ 6, 5, 1 }, /* lvl 6 */
|
{ 6, 5, 1 }, /* lvl 6 */
|
||||||
{ 7, 6, 1 }, /* lvl 7 */
|
{ 7, 6, 1 }, /* lvl 7 */
|
||||||
{ 7, 7, 1 }, /* lvl 8 */
|
{ 7, 6, 0 }, /* lvl 8 */
|
||||||
{ 7, 8, 0 } /* lvl 9 */
|
{ 7, 7, 0 } /* lvl 9 */
|
||||||
};
|
};
|
||||||
#define ATH9K_ANI_OFDM_NUM_LEVEL \
|
#define ATH9K_ANI_OFDM_NUM_LEVEL \
|
||||||
ARRAY_SIZE(ofdm_level_table)
|
ARRAY_SIZE(ofdm_level_table)
|
||||||
@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = {
|
|||||||
{ 4, 0 }, /* lvl 4 */
|
{ 4, 0 }, /* lvl 4 */
|
||||||
{ 5, 0 }, /* lvl 5 */
|
{ 5, 0 }, /* lvl 5 */
|
||||||
{ 6, 0 }, /* lvl 6 */
|
{ 6, 0 }, /* lvl 6 */
|
||||||
{ 7, 0 }, /* lvl 7 (only for high rssi) */
|
{ 6, 0 }, /* lvl 7 (only for high rssi) */
|
||||||
{ 8, 0 } /* lvl 8 (only for high rssi) */
|
{ 7, 0 } /* lvl 8 (only for high rssi) */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ATH9K_ANI_CCK_NUM_LEVEL \
|
#define ATH9K_ANI_CCK_NUM_LEVEL \
|
||||||
@ -290,16 +290,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
|
|||||||
ATH9K_ANI_FIRSTEP_LEVEL,
|
ATH9K_ANI_FIRSTEP_LEVEL,
|
||||||
entry_ofdm->fir_step_level);
|
entry_ofdm->fir_step_level);
|
||||||
|
|
||||||
if ((ah->opmode != NL80211_IFTYPE_STATION &&
|
if ((aniState->noiseFloor >= aniState->rssiThrHigh) &&
|
||||||
ah->opmode != NL80211_IFTYPE_ADHOC) ||
|
(!aniState->ofdmWeakSigDetectOff !=
|
||||||
aniState->noiseFloor <= aniState->rssiThrHigh) {
|
entry_ofdm->ofdm_weak_signal_on)) {
|
||||||
if (aniState->ofdmWeakSigDetectOff)
|
|
||||||
/* force on ofdm weak sig detect */
|
|
||||||
ath9k_hw_ani_control(ah,
|
|
||||||
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
|
|
||||||
true);
|
|
||||||
else if (aniState->ofdmWeakSigDetectOff ==
|
|
||||||
entry_ofdm->ofdm_weak_signal_on)
|
|
||||||
ath9k_hw_ani_control(ah,
|
ath9k_hw_ani_control(ah,
|
||||||
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
|
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
|
||||||
entry_ofdm->ofdm_weak_signal_on);
|
entry_ofdm->ofdm_weak_signal_on);
|
||||||
@ -717,26 +710,30 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||||||
ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
|
ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
|
||||||
cckPhyErrRate, aniState->ofdmsTurn);
|
cckPhyErrRate, aniState->ofdmsTurn);
|
||||||
|
|
||||||
if (aniState->listenTime > 5 * ah->aniperiod) {
|
if (aniState->listenTime > ah->aniperiod) {
|
||||||
if (ofdmPhyErrRate <= ah->config.ofdm_trig_low &&
|
if (cckPhyErrRate < ah->config.cck_trig_low &&
|
||||||
cckPhyErrRate <= ah->config.cck_trig_low) {
|
((ofdmPhyErrRate < ah->config.ofdm_trig_low &&
|
||||||
|
aniState->ofdmNoiseImmunityLevel <
|
||||||
|
ATH9K_ANI_OFDM_DEF_LEVEL) ||
|
||||||
|
(ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
|
||||||
|
aniState->ofdmNoiseImmunityLevel >=
|
||||||
|
ATH9K_ANI_OFDM_DEF_LEVEL))) {
|
||||||
ath9k_hw_ani_lower_immunity(ah);
|
ath9k_hw_ani_lower_immunity(ah);
|
||||||
aniState->ofdmsTurn = !aniState->ofdmsTurn;
|
aniState->ofdmsTurn = !aniState->ofdmsTurn;
|
||||||
}
|
} else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high &&
|
||||||
ath9k_ani_restart(ah);
|
aniState->ofdmNoiseImmunityLevel >=
|
||||||
} else if (aniState->listenTime > ah->aniperiod) {
|
ATH9K_ANI_OFDM_DEF_LEVEL) ||
|
||||||
/* check to see if need to raise immunity */
|
(ofdmPhyErrRate >
|
||||||
if (ofdmPhyErrRate > ah->config.ofdm_trig_high &&
|
ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
|
||||||
(cckPhyErrRate <= ah->config.cck_trig_high ||
|
aniState->ofdmNoiseImmunityLevel <
|
||||||
aniState->ofdmsTurn)) {
|
ATH9K_ANI_OFDM_DEF_LEVEL)) {
|
||||||
ath9k_hw_ani_ofdm_err_trigger(ah);
|
ath9k_hw_ani_ofdm_err_trigger(ah);
|
||||||
ath9k_ani_restart(ah);
|
|
||||||
aniState->ofdmsTurn = false;
|
aniState->ofdmsTurn = false;
|
||||||
} else if (cckPhyErrRate > ah->config.cck_trig_high) {
|
} else if (cckPhyErrRate > ah->config.cck_trig_high) {
|
||||||
ath9k_hw_ani_cck_err_trigger(ah);
|
ath9k_hw_ani_cck_err_trigger(ah);
|
||||||
ath9k_ani_restart(ah);
|
|
||||||
aniState->ofdmsTurn = true;
|
aniState->ofdmsTurn = true;
|
||||||
}
|
}
|
||||||
|
ath9k_ani_restart(ah);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ath9k_hw_ani_monitor);
|
EXPORT_SYMBOL(ath9k_hw_ani_monitor);
|
||||||
|
@ -25,11 +25,13 @@
|
|||||||
|
|
||||||
/* units are errors per second */
|
/* units are errors per second */
|
||||||
#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
|
#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
|
||||||
#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000
|
#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500
|
||||||
|
#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
|
||||||
|
|
||||||
/* units are errors per second */
|
/* units are errors per second */
|
||||||
#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
|
#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
|
||||||
#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
|
#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
|
||||||
|
#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
|
||||||
|
|
||||||
/* units are errors per second */
|
/* units are errors per second */
|
||||||
#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
|
#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
|
||||||
@ -53,7 +55,7 @@
|
|||||||
#define ATH9K_ANI_RSSI_THR_LOW 7
|
#define ATH9K_ANI_RSSI_THR_LOW 7
|
||||||
|
|
||||||
#define ATH9K_ANI_PERIOD_OLD 100
|
#define ATH9K_ANI_PERIOD_OLD 100
|
||||||
#define ATH9K_ANI_PERIOD_NEW 1000
|
#define ATH9K_ANI_PERIOD_NEW 300
|
||||||
|
|
||||||
/* in ms */
|
/* in ms */
|
||||||
#define ATH9K_ANI_POLLINTERVAL_OLD 100
|
#define ATH9K_ANI_POLLINTERVAL_OLD 100
|
||||||
|
@ -1047,46 +1047,8 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
|
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
|
||||||
static const int m1ThreshLow[] = { 127, 50 };
|
|
||||||
static const int m2ThreshLow[] = { 127, 40 };
|
|
||||||
static const int m1Thresh[] = { 127, 0x4d };
|
|
||||||
static const int m2Thresh[] = { 127, 0x40 };
|
|
||||||
static const int m2CountThr[] = { 31, 16 };
|
|
||||||
static const int m2CountThrLow[] = { 63, 48 };
|
|
||||||
u32 on = param ? 1 : 0;
|
u32 on = param ? 1 : 0;
|
||||||
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
|
|
||||||
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
|
|
||||||
m1ThreshLow[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
|
|
||||||
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
|
|
||||||
m2ThreshLow[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
|
|
||||||
AR_PHY_SFCORR_M1_THRESH,
|
|
||||||
m1Thresh[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
|
|
||||||
AR_PHY_SFCORR_M2_THRESH,
|
|
||||||
m2Thresh[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
|
|
||||||
AR_PHY_SFCORR_M2COUNT_THR,
|
|
||||||
m2CountThr[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
|
|
||||||
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
|
|
||||||
m2CountThrLow[on]);
|
|
||||||
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
|
|
||||||
m1ThreshLow[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
|
|
||||||
m2ThreshLow[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M1_THRESH,
|
|
||||||
m1Thresh[on]);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M2_THRESH,
|
|
||||||
m2Thresh[on]);
|
|
||||||
|
|
||||||
if (on)
|
if (on)
|
||||||
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
|
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
|
||||||
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
|
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
|
||||||
|
@ -777,11 +777,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
|
|||||||
{0x0000a074, 0x00000000},
|
{0x0000a074, 0x00000000},
|
||||||
{0x0000a078, 0x00000000},
|
{0x0000a078, 0x00000000},
|
||||||
{0x0000a07c, 0x00000000},
|
{0x0000a07c, 0x00000000},
|
||||||
{0x0000a080, 0x22222229},
|
{0x0000a080, 0x1a1a1a1a},
|
||||||
{0x0000a084, 0x1d1d1d1d},
|
{0x0000a084, 0x1a1a1a1a},
|
||||||
{0x0000a088, 0x1d1d1d1d},
|
{0x0000a088, 0x1a1a1a1a},
|
||||||
{0x0000a08c, 0x1d1d1d1d},
|
{0x0000a08c, 0x1a1a1a1a},
|
||||||
{0x0000a090, 0x171d1d1d},
|
{0x0000a090, 0x171a1a1a},
|
||||||
{0x0000a094, 0x11111717},
|
{0x0000a094, 0x11111717},
|
||||||
{0x0000a098, 0x00030311},
|
{0x0000a098, 0x00030311},
|
||||||
{0x0000a09c, 0x00000000},
|
{0x0000a09c, 0x00000000},
|
||||||
|
@ -823,55 +823,6 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
|
|||||||
* on == 0 means more noise imm
|
* on == 0 means more noise imm
|
||||||
*/
|
*/
|
||||||
u32 on = param ? 1 : 0;
|
u32 on = param ? 1 : 0;
|
||||||
/*
|
|
||||||
* make register setting for default
|
|
||||||
* (weak sig detect ON) come from INI file
|
|
||||||
*/
|
|
||||||
int m1ThreshLow = on ?
|
|
||||||
aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
|
|
||||||
int m2ThreshLow = on ?
|
|
||||||
aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
|
|
||||||
int m1Thresh = on ?
|
|
||||||
aniState->iniDef.m1Thresh : m1Thresh_off;
|
|
||||||
int m2Thresh = on ?
|
|
||||||
aniState->iniDef.m2Thresh : m2Thresh_off;
|
|
||||||
int m2CountThr = on ?
|
|
||||||
aniState->iniDef.m2CountThr : m2CountThr_off;
|
|
||||||
int m2CountThrLow = on ?
|
|
||||||
aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
|
|
||||||
int m1ThreshLowExt = on ?
|
|
||||||
aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
|
|
||||||
int m2ThreshLowExt = on ?
|
|
||||||
aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
|
|
||||||
int m1ThreshExt = on ?
|
|
||||||
aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
|
|
||||||
int m2ThreshExt = on ?
|
|
||||||
aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
|
|
||||||
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
|
|
||||||
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
|
|
||||||
m1ThreshLow);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
|
|
||||||
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
|
|
||||||
m2ThreshLow);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
|
|
||||||
AR_PHY_SFCORR_M1_THRESH, m1Thresh);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
|
|
||||||
AR_PHY_SFCORR_M2_THRESH, m2Thresh);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
|
|
||||||
AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
|
|
||||||
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
|
|
||||||
m2CountThrLow);
|
|
||||||
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
|
|
||||||
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
|
|
||||||
AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
|
|
||||||
|
|
||||||
if (on)
|
if (on)
|
||||||
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
|
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
#include "mci.h"
|
#include "mci.h"
|
||||||
|
#include "dfs.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Header for the ath9k.ko driver core *only* -- hw code nor any other driver
|
* Header for the ath9k.ko driver core *only* -- hw code nor any other driver
|
||||||
@ -430,6 +431,8 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
|
|||||||
void ath_reset_work(struct work_struct *work);
|
void ath_reset_work(struct work_struct *work);
|
||||||
void ath_hw_check(struct work_struct *work);
|
void ath_hw_check(struct work_struct *work);
|
||||||
void ath_hw_pll_work(struct work_struct *work);
|
void ath_hw_pll_work(struct work_struct *work);
|
||||||
|
void ath_rx_poll(unsigned long data);
|
||||||
|
void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
|
||||||
void ath_paprd_calibrate(struct work_struct *work);
|
void ath_paprd_calibrate(struct work_struct *work);
|
||||||
void ath_ani_calibrate(unsigned long data);
|
void ath_ani_calibrate(unsigned long data);
|
||||||
void ath_start_ani(struct ath_common *common);
|
void ath_start_ani(struct ath_common *common);
|
||||||
@ -670,6 +673,7 @@ struct ath_softc {
|
|||||||
struct ath_beacon_config cur_beacon_conf;
|
struct ath_beacon_config cur_beacon_conf;
|
||||||
struct delayed_work tx_complete_work;
|
struct delayed_work tx_complete_work;
|
||||||
struct delayed_work hw_pll_work;
|
struct delayed_work hw_pll_work;
|
||||||
|
struct timer_list rx_poll_timer;
|
||||||
|
|
||||||
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
|
||||||
struct ath_btcoex btcoex;
|
struct ath_btcoex btcoex;
|
||||||
@ -680,6 +684,7 @@ struct ath_softc {
|
|||||||
|
|
||||||
struct ath_ant_comb ant_comb;
|
struct ath_ant_comb ant_comb;
|
||||||
u8 ant_tx, ant_rx;
|
u8 ant_tx, ant_rx;
|
||||||
|
struct dfs_pattern_detector *dfs_detector;
|
||||||
};
|
};
|
||||||
|
|
||||||
void ath9k_tasklet(unsigned long data);
|
void ath9k_tasklet(unsigned long data);
|
||||||
|
@ -524,6 +524,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
|
|||||||
PR("hw-put-tx-buf: ", puttxbuf);
|
PR("hw-put-tx-buf: ", puttxbuf);
|
||||||
PR("hw-tx-start: ", txstart);
|
PR("hw-tx-start: ", txstart);
|
||||||
PR("hw-tx-proc-desc: ", txprocdesc);
|
PR("hw-tx-proc-desc: ", txprocdesc);
|
||||||
|
PR("TX-Failed: ", txfailed);
|
||||||
len += snprintf(buf + len, size - len,
|
len += snprintf(buf + len, size - len,
|
||||||
"%s%11p%11p%10p%10p\n", "txq-memory-address:",
|
"%s%11p%11p%10p%10p\n", "txq-memory-address:",
|
||||||
sc->tx.txq_map[WME_AC_BE],
|
sc->tx.txq_map[WME_AC_BE],
|
||||||
@ -910,6 +911,21 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
|
|||||||
len += snprintf(buf + len, size - len,
|
len += snprintf(buf + len, size - len,
|
||||||
"%22s : %10u\n", "DECRYPT BUSY ERR",
|
"%22s : %10u\n", "DECRYPT BUSY ERR",
|
||||||
sc->debug.stats.rxstats.decrypt_busy_err);
|
sc->debug.stats.rxstats.decrypt_busy_err);
|
||||||
|
len += snprintf(buf + len, size - len,
|
||||||
|
"%22s : %10u\n", "RX-LENGTH-ERR",
|
||||||
|
sc->debug.stats.rxstats.rx_len_err);
|
||||||
|
len += snprintf(buf + len, size - len,
|
||||||
|
"%22s : %10u\n", "RX-OOM-ERR",
|
||||||
|
sc->debug.stats.rxstats.rx_oom_err);
|
||||||
|
len += snprintf(buf + len, size - len,
|
||||||
|
"%22s : %10u\n", "RX-RATE-ERR",
|
||||||
|
sc->debug.stats.rxstats.rx_rate_err);
|
||||||
|
len += snprintf(buf + len, size - len,
|
||||||
|
"%22s : %10u\n", "RX-DROP-RXFLUSH",
|
||||||
|
sc->debug.stats.rxstats.rx_drop_rxflush);
|
||||||
|
len += snprintf(buf + len, size - len,
|
||||||
|
"%22s : %10u\n", "RX-TOO-MANY-FRAGS",
|
||||||
|
sc->debug.stats.rxstats.rx_too_many_frags_err);
|
||||||
|
|
||||||
PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
|
PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
|
||||||
PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
|
PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
|
||||||
@ -944,6 +960,12 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
|
|||||||
len += snprintf(buf + len, size - len,
|
len += snprintf(buf + len, size - len,
|
||||||
"%22s : %10u\n", "RX-Bytes-All",
|
"%22s : %10u\n", "RX-Bytes-All",
|
||||||
sc->debug.stats.rxstats.rx_bytes_all);
|
sc->debug.stats.rxstats.rx_bytes_all);
|
||||||
|
len += snprintf(buf + len, size - len,
|
||||||
|
"%22s : %10u\n", "RX-Beacons",
|
||||||
|
sc->debug.stats.rxstats.rx_beacons);
|
||||||
|
len += snprintf(buf + len, size - len,
|
||||||
|
"%22s : %10u\n", "RX-Frags",
|
||||||
|
sc->debug.stats.rxstats.rx_frags);
|
||||||
|
|
||||||
if (len > size)
|
if (len > size)
|
||||||
len = size;
|
len = size;
|
||||||
@ -958,7 +980,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
|
|||||||
|
|
||||||
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
|
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
|
||||||
{
|
{
|
||||||
#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
|
|
||||||
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
|
#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
|
||||||
#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
|
#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
|
||||||
[sc->debug.rsidx].c)
|
[sc->debug.rsidx].c)
|
||||||
@ -1004,7 +1025,6 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#undef RX_STAT_INC
|
|
||||||
#undef RX_PHY_ERR_INC
|
#undef RX_PHY_ERR_INC
|
||||||
#undef RX_SAMP_DBG
|
#undef RX_SAMP_DBG
|
||||||
}
|
}
|
||||||
|
@ -113,6 +113,7 @@ struct ath_interrupt_stats {
|
|||||||
* @puttxbuf: Number of times hardware was given txbuf to write.
|
* @puttxbuf: Number of times hardware was given txbuf to write.
|
||||||
* @txstart: Number of times hardware was told to start tx.
|
* @txstart: Number of times hardware was told to start tx.
|
||||||
* @txprocdesc: Number of times tx descriptor was processed
|
* @txprocdesc: Number of times tx descriptor was processed
|
||||||
|
* @txfailed: Out-of-memory or other errors in xmit path.
|
||||||
*/
|
*/
|
||||||
struct ath_tx_stats {
|
struct ath_tx_stats {
|
||||||
u32 tx_pkts_all;
|
u32 tx_pkts_all;
|
||||||
@ -135,8 +136,11 @@ struct ath_tx_stats {
|
|||||||
u32 puttxbuf;
|
u32 puttxbuf;
|
||||||
u32 txstart;
|
u32 txstart;
|
||||||
u32 txprocdesc;
|
u32 txprocdesc;
|
||||||
|
u32 txfailed;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct ath_rx_stats - RX Statistics
|
* struct ath_rx_stats - RX Statistics
|
||||||
* @rx_pkts_all: No. of total frames received, including ones that
|
* @rx_pkts_all: No. of total frames received, including ones that
|
||||||
@ -153,6 +157,13 @@ struct ath_tx_stats {
|
|||||||
* @post_delim_crc_err: Post-Frame delimiter CRC error detections
|
* @post_delim_crc_err: Post-Frame delimiter CRC error detections
|
||||||
* @decrypt_busy_err: Decryption interruptions counter
|
* @decrypt_busy_err: Decryption interruptions counter
|
||||||
* @phy_err_stats: Individual PHY error statistics
|
* @phy_err_stats: Individual PHY error statistics
|
||||||
|
* @rx_len_err: No. of frames discarded due to bad length.
|
||||||
|
* @rx_oom_err: No. of frames dropped due to OOM issues.
|
||||||
|
* @rx_rate_err: No. of frames dropped due to rate errors.
|
||||||
|
* @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
|
||||||
|
* @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
|
||||||
|
* @rx_beacons: No. of beacons received.
|
||||||
|
* @rx_frags: No. of rx-fragements received.
|
||||||
*/
|
*/
|
||||||
struct ath_rx_stats {
|
struct ath_rx_stats {
|
||||||
u32 rx_pkts_all;
|
u32 rx_pkts_all;
|
||||||
@ -165,6 +176,13 @@ struct ath_rx_stats {
|
|||||||
u32 post_delim_crc_err;
|
u32 post_delim_crc_err;
|
||||||
u32 decrypt_busy_err;
|
u32 decrypt_busy_err;
|
||||||
u32 phy_err_stats[ATH9K_PHYERR_MAX];
|
u32 phy_err_stats[ATH9K_PHYERR_MAX];
|
||||||
|
u32 rx_len_err;
|
||||||
|
u32 rx_oom_err;
|
||||||
|
u32 rx_rate_err;
|
||||||
|
u32 rx_too_many_frags_err;
|
||||||
|
u32 rx_drop_rxflush;
|
||||||
|
u32 rx_beacons;
|
||||||
|
u32 rx_frags;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum ath_reset_type {
|
enum ath_reset_type {
|
||||||
@ -174,6 +192,7 @@ enum ath_reset_type {
|
|||||||
RESET_TYPE_TX_ERROR,
|
RESET_TYPE_TX_ERROR,
|
||||||
RESET_TYPE_TX_HANG,
|
RESET_TYPE_TX_HANG,
|
||||||
RESET_TYPE_PLL_HANG,
|
RESET_TYPE_PLL_HANG,
|
||||||
|
RESET_TYPE_MAC_HANG,
|
||||||
__RESET_TYPE_MAX
|
__RESET_TYPE_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -247,6 +266,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
#define RX_STAT_INC(c) /* NOP */
|
||||||
|
|
||||||
static inline int ath9k_init_debug(struct ath_hw *ah)
|
static inline int ath9k_init_debug(struct ath_hw *ah)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -21,17 +21,6 @@
|
|||||||
#include "dfs.h"
|
#include "dfs.h"
|
||||||
#include "dfs_debug.h"
|
#include "dfs_debug.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: move into or synchronize this with generic header
|
|
||||||
* as soon as IF is defined
|
|
||||||
*/
|
|
||||||
struct dfs_radar_pulse {
|
|
||||||
u16 freq;
|
|
||||||
u64 ts;
|
|
||||||
u32 width;
|
|
||||||
u8 rssi;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* internal struct to pass radar data */
|
/* internal struct to pass radar data */
|
||||||
struct ath_radar_data {
|
struct ath_radar_data {
|
||||||
u8 pulse_bw_info;
|
u8 pulse_bw_info;
|
||||||
@ -60,44 +49,44 @@ static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
|
|||||||
#define EXT_CH_RADAR_FOUND 0x02
|
#define EXT_CH_RADAR_FOUND 0x02
|
||||||
static bool
|
static bool
|
||||||
ath9k_postprocess_radar_event(struct ath_softc *sc,
|
ath9k_postprocess_radar_event(struct ath_softc *sc,
|
||||||
struct ath_radar_data *are,
|
struct ath_radar_data *ard,
|
||||||
struct dfs_radar_pulse *drp)
|
struct pulse_event *pe)
|
||||||
{
|
{
|
||||||
u8 rssi;
|
u8 rssi;
|
||||||
u16 dur;
|
u16 dur;
|
||||||
|
|
||||||
ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
|
ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
|
||||||
"pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
|
"pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
|
||||||
are->pulse_bw_info,
|
ard->pulse_bw_info,
|
||||||
are->pulse_length_pri, are->rssi,
|
ard->pulse_length_pri, ard->rssi,
|
||||||
are->pulse_length_ext, are->ext_rssi);
|
ard->pulse_length_ext, ard->ext_rssi);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only the last 2 bits of the BW info are relevant, they indicate
|
* Only the last 2 bits of the BW info are relevant, they indicate
|
||||||
* which channel the radar was detected in.
|
* which channel the radar was detected in.
|
||||||
*/
|
*/
|
||||||
are->pulse_bw_info &= 0x03;
|
ard->pulse_bw_info &= 0x03;
|
||||||
|
|
||||||
switch (are->pulse_bw_info) {
|
switch (ard->pulse_bw_info) {
|
||||||
case PRI_CH_RADAR_FOUND:
|
case PRI_CH_RADAR_FOUND:
|
||||||
/* radar in ctrl channel */
|
/* radar in ctrl channel */
|
||||||
dur = are->pulse_length_pri;
|
dur = ard->pulse_length_pri;
|
||||||
DFS_STAT_INC(sc, pri_phy_errors);
|
DFS_STAT_INC(sc, pri_phy_errors);
|
||||||
/*
|
/*
|
||||||
* cannot use ctrl channel RSSI
|
* cannot use ctrl channel RSSI
|
||||||
* if extension channel is stronger
|
* if extension channel is stronger
|
||||||
*/
|
*/
|
||||||
rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
|
rssi = (ard->ext_rssi >= (ard->rssi + 3)) ? 0 : ard->rssi;
|
||||||
break;
|
break;
|
||||||
case EXT_CH_RADAR_FOUND:
|
case EXT_CH_RADAR_FOUND:
|
||||||
/* radar in extension channel */
|
/* radar in extension channel */
|
||||||
dur = are->pulse_length_ext;
|
dur = ard->pulse_length_ext;
|
||||||
DFS_STAT_INC(sc, ext_phy_errors);
|
DFS_STAT_INC(sc, ext_phy_errors);
|
||||||
/*
|
/*
|
||||||
* cannot use extension channel RSSI
|
* cannot use extension channel RSSI
|
||||||
* if control channel is stronger
|
* if control channel is stronger
|
||||||
*/
|
*/
|
||||||
rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
|
rssi = (ard->rssi >= (ard->ext_rssi + 12)) ? 0 : ard->ext_rssi;
|
||||||
break;
|
break;
|
||||||
case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
|
case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
|
||||||
/*
|
/*
|
||||||
@ -107,14 +96,14 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
|
|||||||
* Radiated testing, when pulse is on DC, different pri and
|
* Radiated testing, when pulse is on DC, different pri and
|
||||||
* ext durations are reported, so take the larger of the two
|
* ext durations are reported, so take the larger of the two
|
||||||
*/
|
*/
|
||||||
if (are->pulse_length_ext >= are->pulse_length_pri)
|
if (ard->pulse_length_ext >= ard->pulse_length_pri)
|
||||||
dur = are->pulse_length_ext;
|
dur = ard->pulse_length_ext;
|
||||||
else
|
else
|
||||||
dur = are->pulse_length_pri;
|
dur = ard->pulse_length_pri;
|
||||||
DFS_STAT_INC(sc, dc_phy_errors);
|
DFS_STAT_INC(sc, dc_phy_errors);
|
||||||
|
|
||||||
/* when both are present use stronger one */
|
/* when both are present use stronger one */
|
||||||
rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
|
rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/*
|
/*
|
||||||
@ -137,8 +126,8 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* convert duration to usecs */
|
/* convert duration to usecs */
|
||||||
drp->width = dur_to_usecs(sc->sc_ah, dur);
|
pe->width = dur_to_usecs(sc->sc_ah, dur);
|
||||||
drp->rssi = rssi;
|
pe->rssi = rssi;
|
||||||
|
|
||||||
DFS_STAT_INC(sc, pulses_detected);
|
DFS_STAT_INC(sc, pulses_detected);
|
||||||
return true;
|
return true;
|
||||||
@ -155,12 +144,12 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
|
|||||||
struct ath_radar_data ard;
|
struct ath_radar_data ard;
|
||||||
u16 datalen;
|
u16 datalen;
|
||||||
char *vdata_end;
|
char *vdata_end;
|
||||||
struct dfs_radar_pulse drp;
|
struct pulse_event pe;
|
||||||
struct ath_hw *ah = sc->sc_ah;
|
struct ath_hw *ah = sc->sc_ah;
|
||||||
struct ath_common *common = ath9k_hw_common(ah);
|
struct ath_common *common = ath9k_hw_common(ah);
|
||||||
|
|
||||||
if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
|
if ((rs->rs_phyerr != ATH9K_PHYERR_RADAR) &&
|
||||||
(!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
|
(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT)) {
|
||||||
ath_dbg(common, DFS,
|
ath_dbg(common, DFS,
|
||||||
"Error: rs_phyer=0x%x not a radar error\n",
|
"Error: rs_phyer=0x%x not a radar error\n",
|
||||||
rs->rs_phyerr);
|
rs->rs_phyerr);
|
||||||
@ -189,27 +178,20 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
|
|||||||
ard.pulse_bw_info = vdata_end[-1];
|
ard.pulse_bw_info = vdata_end[-1];
|
||||||
ard.pulse_length_ext = vdata_end[-2];
|
ard.pulse_length_ext = vdata_end[-2];
|
||||||
ard.pulse_length_pri = vdata_end[-3];
|
ard.pulse_length_pri = vdata_end[-3];
|
||||||
|
pe.freq = ah->curchan->channel;
|
||||||
ath_dbg(common, DFS,
|
pe.ts = mactime;
|
||||||
"bw_info=%d, length_pri=%d, length_ext=%d, "
|
if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
|
||||||
"rssi_pri=%d, rssi_ext=%d\n",
|
struct dfs_pattern_detector *pd = sc->dfs_detector;
|
||||||
ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
|
|
||||||
ard.rssi, ard.ext_rssi);
|
|
||||||
|
|
||||||
drp.freq = ah->curchan->channel;
|
|
||||||
drp.ts = mactime;
|
|
||||||
if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
|
|
||||||
static u64 last_ts;
|
static u64 last_ts;
|
||||||
ath_dbg(common, DFS,
|
ath_dbg(common, DFS,
|
||||||
"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
|
"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
|
||||||
"width=%d, rssi=%d, delta_ts=%llu\n",
|
"width=%d, rssi=%d, delta_ts=%llu\n",
|
||||||
drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
|
pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts);
|
||||||
last_ts = drp.ts;
|
last_ts = pe.ts;
|
||||||
|
if (pd != NULL && pd->add_pulse(pd, &pe)) {
|
||||||
/*
|
/*
|
||||||
* TODO: forward pulse to pattern detector
|
* TODO: forward radar event to DFS management layer
|
||||||
*
|
|
||||||
* ieee80211_add_radar_pulse(drp.freq, drp.ts,
|
|
||||||
* drp.width, drp.rssi);
|
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
#ifndef ATH9K_DFS_H
|
#ifndef ATH9K_DFS_H
|
||||||
#define ATH9K_DFS_H
|
#define ATH9K_DFS_H
|
||||||
|
#include "dfs_pattern_detector.h"
|
||||||
|
|
||||||
#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
|
#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
|
||||||
/**
|
/**
|
||||||
@ -31,12 +32,13 @@
|
|||||||
*
|
*
|
||||||
* The radar information provided as raw payload data is validated and
|
* The radar information provided as raw payload data is validated and
|
||||||
* filtered for false pulses. Events passing all tests are forwarded to
|
* filtered for false pulses. Events passing all tests are forwarded to
|
||||||
* the upper layer for pattern detection.
|
* the DFS detector for pattern detection.
|
||||||
*/
|
*/
|
||||||
void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
|
void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
|
||||||
struct ath_rx_status *rs, u64 mactime);
|
struct ath_rx_status *rs, u64 mactime);
|
||||||
#else
|
#else
|
||||||
static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
|
static inline void
|
||||||
|
ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
|
||||||
struct ath_rx_status *rs, u64 mactime) { }
|
struct ath_rx_status *rs, u64 mactime) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
300
drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
Normal file
300
drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
Normal file
@ -0,0 +1,300 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012 Neratec Solutions AG
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
|
||||||
|
#include "dfs_pattern_detector.h"
|
||||||
|
#include "dfs_pri_detector.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* tolerated deviation of radar time stamp in usecs on both sides
|
||||||
|
* TODO: this might need to be HW-dependent
|
||||||
|
*/
|
||||||
|
#define PRI_TOLERANCE 16
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct radar_types - contains array of patterns defined for one DFS domain
|
||||||
|
* @domain: DFS regulatory domain
|
||||||
|
* @num_radar_types: number of radar types to follow
|
||||||
|
* @radar_types: radar types array
|
||||||
|
*/
|
||||||
|
struct radar_types {
|
||||||
|
enum nl80211_dfs_regions region;
|
||||||
|
u32 num_radar_types;
|
||||||
|
const struct radar_detector_specs *radar_types;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* percentage on ppb threshold to trigger detection */
|
||||||
|
#define MIN_PPB_THRESH 50
|
||||||
|
#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
|
||||||
|
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
|
||||||
|
|
||||||
|
#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
|
||||||
|
{ \
|
||||||
|
ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE), \
|
||||||
|
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
|
||||||
|
PPB_THRESH(PPB), PRI_TOLERANCE, \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* radar types as defined by ETSI EN-301-893 v1.5.1 */
|
||||||
|
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
|
||||||
|
ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
|
||||||
|
ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
|
||||||
|
ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
|
||||||
|
ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
|
||||||
|
ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
|
||||||
|
ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
|
||||||
|
ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct radar_types etsi_radar_types_v15 = {
|
||||||
|
.region = NL80211_DFS_ETSI,
|
||||||
|
.num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v15),
|
||||||
|
.radar_types = etsi_radar_ref_types_v15,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* for now, we support ETSI radar types, FCC and JP are TODO */
|
||||||
|
static const struct radar_types *dfs_domains[] = {
|
||||||
|
&etsi_radar_types_v15,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_dfs_domain_radar_types() - get radar types for a given DFS domain
|
||||||
|
* @param domain DFS domain
|
||||||
|
* @return radar_types ptr on success, NULL if DFS domain is not supported
|
||||||
|
*/
|
||||||
|
static const struct radar_types *
|
||||||
|
get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
|
||||||
|
if (dfs_domains[i]->region == region)
|
||||||
|
return dfs_domains[i];
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct channel_detector - detector elements for a DFS channel
|
||||||
|
* @head: list_head
|
||||||
|
* @freq: frequency for this channel detector in MHz
|
||||||
|
* @detectors: array of dynamically created detector elements for this freq
|
||||||
|
*
|
||||||
|
* Channel detectors are required to provide multi-channel DFS detection, e.g.
|
||||||
|
* to support off-channel scanning. A pattern detector has a list of channels
|
||||||
|
* radar pulses have been reported for in the past.
|
||||||
|
*/
|
||||||
|
struct channel_detector {
|
||||||
|
struct list_head head;
|
||||||
|
u16 freq;
|
||||||
|
struct pri_detector **detectors;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* channel_detector_reset() - reset detector lines for a given channel */
|
||||||
|
static void channel_detector_reset(struct dfs_pattern_detector *dpd,
|
||||||
|
struct channel_detector *cd)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
if (cd == NULL)
|
||||||
|
return;
|
||||||
|
for (i = 0; i < dpd->num_radar_types; i++)
|
||||||
|
cd->detectors[i]->reset(cd->detectors[i], dpd->last_pulse_ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* channel_detector_exit() - destructor */
|
||||||
|
static void channel_detector_exit(struct dfs_pattern_detector *dpd,
|
||||||
|
struct channel_detector *cd)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
if (cd == NULL)
|
||||||
|
return;
|
||||||
|
list_del(&cd->head);
|
||||||
|
for (i = 0; i < dpd->num_radar_types; i++) {
|
||||||
|
struct pri_detector *de = cd->detectors[i];
|
||||||
|
if (de != NULL)
|
||||||
|
de->exit(de);
|
||||||
|
}
|
||||||
|
kfree(cd->detectors);
|
||||||
|
kfree(cd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct channel_detector *
|
||||||
|
channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
|
||||||
|
{
|
||||||
|
u32 sz, i;
|
||||||
|
struct channel_detector *cd;
|
||||||
|
|
||||||
|
cd = kmalloc(sizeof(*cd), GFP_KERNEL);
|
||||||
|
if (cd == NULL)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&cd->head);
|
||||||
|
cd->freq = freq;
|
||||||
|
sz = sizeof(cd->detectors) * dpd->num_radar_types;
|
||||||
|
cd->detectors = kzalloc(sz, GFP_KERNEL);
|
||||||
|
if (cd->detectors == NULL)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
for (i = 0; i < dpd->num_radar_types; i++) {
|
||||||
|
const struct radar_detector_specs *rs = &dpd->radar_spec[i];
|
||||||
|
struct pri_detector *de = pri_detector_init(rs);
|
||||||
|
if (de == NULL)
|
||||||
|
goto fail;
|
||||||
|
cd->detectors[i] = de;
|
||||||
|
}
|
||||||
|
list_add(&cd->head, &dpd->channel_detectors);
|
||||||
|
return cd;
|
||||||
|
|
||||||
|
fail:
|
||||||
|
pr_err("failed to allocate channel_detector for freq=%d\n", freq);
|
||||||
|
channel_detector_exit(dpd, cd);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* channel_detector_get() - get channel detector for given frequency
|
||||||
|
* @param dpd instance pointer
|
||||||
|
* @param freq frequency in MHz
|
||||||
|
* @return pointer to channel detector on success, NULL otherwise
|
||||||
|
*
|
||||||
|
* Return existing channel detector for the given frequency or return a
|
||||||
|
* newly create one.
|
||||||
|
*/
|
||||||
|
static struct channel_detector *
|
||||||
|
channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
|
||||||
|
{
|
||||||
|
struct channel_detector *cd;
|
||||||
|
list_for_each_entry(cd, &dpd->channel_detectors, head) {
|
||||||
|
if (cd->freq == freq)
|
||||||
|
return cd;
|
||||||
|
}
|
||||||
|
return channel_detector_create(dpd, freq);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* DFS Pattern Detector
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* dpd_reset(): reset all channel detectors */
|
||||||
|
static void dpd_reset(struct dfs_pattern_detector *dpd)
|
||||||
|
{
|
||||||
|
struct channel_detector *cd;
|
||||||
|
if (!list_empty(&dpd->channel_detectors))
|
||||||
|
list_for_each_entry(cd, &dpd->channel_detectors, head)
|
||||||
|
channel_detector_reset(dpd, cd);
|
||||||
|
|
||||||
|
}
|
||||||
|
static void dpd_exit(struct dfs_pattern_detector *dpd)
|
||||||
|
{
|
||||||
|
struct channel_detector *cd, *cd0;
|
||||||
|
if (!list_empty(&dpd->channel_detectors))
|
||||||
|
list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
|
||||||
|
channel_detector_exit(dpd, cd);
|
||||||
|
kfree(dpd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
bool ts_wraparound;
|
||||||
|
struct channel_detector *cd;
|
||||||
|
|
||||||
|
if (dpd->region == NL80211_DFS_UNSET) {
|
||||||
|
/*
|
||||||
|
* pulses received for a non-supported or un-initialized
|
||||||
|
* domain are treated as detected radars
|
||||||
|
*/
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
cd = channel_detector_get(dpd, event->freq);
|
||||||
|
if (cd == NULL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ts_wraparound = (event->ts < dpd->last_pulse_ts);
|
||||||
|
dpd->last_pulse_ts = event->ts;
|
||||||
|
if (ts_wraparound) {
|
||||||
|
/*
|
||||||
|
* reset detector on time stamp wraparound
|
||||||
|
* with monotonic time stamps, this should never happen
|
||||||
|
*/
|
||||||
|
pr_warn("DFS: time stamp wraparound detected, resetting\n");
|
||||||
|
dpd_reset(dpd);
|
||||||
|
}
|
||||||
|
/* do type individual pattern matching */
|
||||||
|
for (i = 0; i < dpd->num_radar_types; i++) {
|
||||||
|
if (cd->detectors[i]->add_pulse(cd->detectors[i], event) != 0) {
|
||||||
|
channel_detector_reset(dpd, cd);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
|
||||||
|
enum nl80211_dfs_regions region)
|
||||||
|
{
|
||||||
|
const struct radar_types *rt;
|
||||||
|
struct channel_detector *cd, *cd0;
|
||||||
|
|
||||||
|
if (dpd->region == region)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
dpd->region = NL80211_DFS_UNSET;
|
||||||
|
|
||||||
|
rt = get_dfs_domain_radar_types(region);
|
||||||
|
if (rt == NULL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* delete all channel detectors for previous DFS domain */
|
||||||
|
if (!list_empty(&dpd->channel_detectors))
|
||||||
|
list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
|
||||||
|
channel_detector_exit(dpd, cd);
|
||||||
|
dpd->radar_spec = rt->radar_types;
|
||||||
|
dpd->num_radar_types = rt->num_radar_types;
|
||||||
|
|
||||||
|
dpd->region = region;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct dfs_pattern_detector default_dpd = {
|
||||||
|
.exit = dpd_exit,
|
||||||
|
.set_domain = dpd_set_domain,
|
||||||
|
.add_pulse = dpd_add_pulse,
|
||||||
|
.region = NL80211_DFS_UNSET,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct dfs_pattern_detector *
|
||||||
|
dfs_pattern_detector_init(enum nl80211_dfs_regions region)
|
||||||
|
{
|
||||||
|
struct dfs_pattern_detector *dpd;
|
||||||
|
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
|
||||||
|
if (dpd == NULL) {
|
||||||
|
pr_err("allocation of dfs_pattern_detector failed\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
*dpd = default_dpd;
|
||||||
|
INIT_LIST_HEAD(&dpd->channel_detectors);
|
||||||
|
|
||||||
|
if (dpd->set_domain(dpd, region))
|
||||||
|
return dpd;
|
||||||
|
|
||||||
|
pr_err("Could not set DFS domain to %d. ", region);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dfs_pattern_detector_init);
|
104
drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
Normal file
104
drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012 Neratec Solutions AG
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef DFS_PATTERN_DETECTOR_H
|
||||||
|
#define DFS_PATTERN_DETECTOR_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include <linux/nl80211.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct pulse_event - describing pulses reported by PHY
|
||||||
|
* @ts: pulse time stamp in us
|
||||||
|
* @freq: channel frequency in MHz
|
||||||
|
* @width: pulse duration in us
|
||||||
|
* @rssi: rssi of radar event
|
||||||
|
*/
|
||||||
|
struct pulse_event {
|
||||||
|
u64 ts;
|
||||||
|
u16 freq;
|
||||||
|
u8 width;
|
||||||
|
u8 rssi;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct radar_detector_specs - detector specs for a radar pattern type
|
||||||
|
* @type_id: pattern type, as defined by regulatory
|
||||||
|
* @width_min: minimum radar pulse width in [us]
|
||||||
|
* @width_max: maximum radar pulse width in [us]
|
||||||
|
* @pri_min: minimum pulse repetition interval in [us] (including tolerance)
|
||||||
|
* @pri_max: minimum pri in [us] (including tolerance)
|
||||||
|
* @num_pri: maximum number of different pri for this type
|
||||||
|
* @ppb: pulses per bursts for this type
|
||||||
|
* @ppb_thresh: number of pulses required to trigger detection
|
||||||
|
* @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
|
||||||
|
*/
|
||||||
|
struct radar_detector_specs {
|
||||||
|
u8 type_id;
|
||||||
|
u8 width_min;
|
||||||
|
u8 width_max;
|
||||||
|
u16 pri_min;
|
||||||
|
u16 pri_max;
|
||||||
|
u8 num_pri;
|
||||||
|
u8 ppb;
|
||||||
|
u8 ppb_thresh;
|
||||||
|
u8 max_pri_tolerance;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct dfs_pattern_detector - DFS pattern detector
|
||||||
|
* @exit(): destructor
|
||||||
|
* @set_domain(): set DFS domain, resets detector lines upon domain changes
|
||||||
|
* @add_pulse(): add radar pulse to detector, returns true on detection
|
||||||
|
* @region: active DFS region, NL80211_DFS_UNSET until set
|
||||||
|
* @num_radar_types: number of different radar types
|
||||||
|
* @last_pulse_ts: time stamp of last valid pulse in usecs
|
||||||
|
* @radar_detector_specs: array of radar detection specs
|
||||||
|
* @channel_detectors: list connecting channel_detector elements
|
||||||
|
*/
|
||||||
|
struct dfs_pattern_detector {
|
||||||
|
void (*exit)(struct dfs_pattern_detector *dpd);
|
||||||
|
bool (*set_domain)(struct dfs_pattern_detector *dpd,
|
||||||
|
enum nl80211_dfs_regions region);
|
||||||
|
bool (*add_pulse)(struct dfs_pattern_detector *dpd,
|
||||||
|
struct pulse_event *pe);
|
||||||
|
|
||||||
|
enum nl80211_dfs_regions region;
|
||||||
|
u8 num_radar_types;
|
||||||
|
u64 last_pulse_ts;
|
||||||
|
|
||||||
|
const struct radar_detector_specs *radar_spec;
|
||||||
|
struct list_head channel_detectors;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dfs_pattern_detector_init() - constructor for pattern detector class
|
||||||
|
* @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
|
||||||
|
* @return instance pointer on success, NULL otherwise
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
|
||||||
|
extern struct dfs_pattern_detector *
|
||||||
|
dfs_pattern_detector_init(enum nl80211_dfs_regions region);
|
||||||
|
#else
|
||||||
|
static inline struct dfs_pattern_detector *
|
||||||
|
dfs_pattern_detector_init(enum nl80211_dfs_regions region)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
|
||||||
|
|
||||||
|
#endif /* DFS_PATTERN_DETECTOR_H */
|
390
drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
Normal file
390
drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
Normal file
@ -0,0 +1,390 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012 Neratec Solutions AG
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
#include "dfs_pattern_detector.h"
|
||||||
|
#include "dfs_pri_detector.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct pri_sequence - sequence of pulses matching one PRI
|
||||||
|
* @head: list_head
|
||||||
|
* @pri: pulse repetition interval (PRI) in usecs
|
||||||
|
* @dur: duration of sequence in usecs
|
||||||
|
* @count: number of pulses in this sequence
|
||||||
|
* @count_falses: number of not matching pulses in this sequence
|
||||||
|
* @first_ts: time stamp of first pulse in usecs
|
||||||
|
* @last_ts: time stamp of last pulse in usecs
|
||||||
|
* @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
|
||||||
|
*/
|
||||||
|
struct pri_sequence {
|
||||||
|
struct list_head head;
|
||||||
|
u32 pri;
|
||||||
|
u32 dur;
|
||||||
|
u32 count;
|
||||||
|
u32 count_falses;
|
||||||
|
u64 first_ts;
|
||||||
|
u64 last_ts;
|
||||||
|
u64 deadline_ts;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct pulse_elem - elements in pulse queue
|
||||||
|
* @ts: time stamp in usecs
|
||||||
|
*/
|
||||||
|
struct pulse_elem {
|
||||||
|
struct list_head head;
|
||||||
|
u64 ts;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pde_get_multiple() - get number of multiples considering a given tolerance
|
||||||
|
* @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
|
||||||
|
*/
|
||||||
|
static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
|
||||||
|
{
|
||||||
|
u32 remainder;
|
||||||
|
u32 factor;
|
||||||
|
u32 delta;
|
||||||
|
|
||||||
|
if (fraction == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
delta = (val < fraction) ? (fraction - val) : (val - fraction);
|
||||||
|
|
||||||
|
if (delta <= tolerance)
|
||||||
|
/* val and fraction are within tolerance */
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
factor = val / fraction;
|
||||||
|
remainder = val % fraction;
|
||||||
|
if (remainder > tolerance) {
|
||||||
|
/* no exact match */
|
||||||
|
if ((fraction - remainder) <= tolerance)
|
||||||
|
/* remainder is within tolerance */
|
||||||
|
factor++;
|
||||||
|
else
|
||||||
|
factor = 0;
|
||||||
|
}
|
||||||
|
return factor;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DOC: Singleton Pulse and Sequence Pools
|
||||||
|
*
|
||||||
|
* Instances of pri_sequence and pulse_elem are kept in singleton pools to
|
||||||
|
* reduce the number of dynamic allocations. They are shared between all
|
||||||
|
* instances and grow up to the peak number of simultaneously used objects.
|
||||||
|
*
|
||||||
|
* Memory is freed after all references to the pools are released.
|
||||||
|
*/
|
||||||
|
static u32 singleton_pool_references;
|
||||||
|
static LIST_HEAD(pulse_pool);
|
||||||
|
static LIST_HEAD(pseq_pool);
|
||||||
|
|
||||||
|
static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
|
||||||
|
{
|
||||||
|
struct list_head *l = &pde->pulses;
|
||||||
|
if (list_empty(l))
|
||||||
|
return NULL;
|
||||||
|
return list_entry(l->prev, struct pulse_elem, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pulse_queue_dequeue(struct pri_detector *pde)
|
||||||
|
{
|
||||||
|
struct pulse_elem *p = pulse_queue_get_tail(pde);
|
||||||
|
if (p != NULL) {
|
||||||
|
list_del_init(&p->head);
|
||||||
|
pde->count--;
|
||||||
|
/* give it back to pool */
|
||||||
|
list_add(&p->head, &pulse_pool);
|
||||||
|
}
|
||||||
|
return (pde->count > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* remove pulses older than window */
|
||||||
|
static void pulse_queue_check_window(struct pri_detector *pde)
|
||||||
|
{
|
||||||
|
u64 min_valid_ts;
|
||||||
|
struct pulse_elem *p;
|
||||||
|
|
||||||
|
/* there is no delta time with less than 2 pulses */
|
||||||
|
if (pde->count < 2)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (pde->last_ts <= pde->window_size)
|
||||||
|
return;
|
||||||
|
|
||||||
|
min_valid_ts = pde->last_ts - pde->window_size;
|
||||||
|
while ((p = pulse_queue_get_tail(pde)) != NULL) {
|
||||||
|
if (p->ts >= min_valid_ts)
|
||||||
|
return;
|
||||||
|
pulse_queue_dequeue(pde);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
|
||||||
|
{
|
||||||
|
struct pulse_elem *p;
|
||||||
|
if (!list_empty(&pulse_pool)) {
|
||||||
|
p = list_first_entry(&pulse_pool, struct pulse_elem, head);
|
||||||
|
list_del(&p->head);
|
||||||
|
} else {
|
||||||
|
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||||
|
if (p == NULL) {
|
||||||
|
pr_err("failed to allocate pulse_elem\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
INIT_LIST_HEAD(&p->head);
|
||||||
|
p->ts = ts;
|
||||||
|
list_add(&p->head, &pde->pulses);
|
||||||
|
pde->count++;
|
||||||
|
pde->last_ts = ts;
|
||||||
|
pulse_queue_check_window(pde);
|
||||||
|
if (pde->count >= pde->max_count)
|
||||||
|
pulse_queue_dequeue(pde);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pseq_handler_create_sequences(struct pri_detector *pde,
|
||||||
|
u64 ts, u32 min_count)
|
||||||
|
{
|
||||||
|
struct pulse_elem *p;
|
||||||
|
list_for_each_entry(p, &pde->pulses, head) {
|
||||||
|
struct pri_sequence ps, *new_ps;
|
||||||
|
struct pulse_elem *p2;
|
||||||
|
u32 tmp_false_count;
|
||||||
|
u64 min_valid_ts;
|
||||||
|
u32 delta_ts = ts - p->ts;
|
||||||
|
|
||||||
|
if (delta_ts < pde->rs->pri_min)
|
||||||
|
/* ignore too small pri */
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (delta_ts > pde->rs->pri_max)
|
||||||
|
/* stop on too large pri (sorted list) */
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* build a new sequence with new potential pri */
|
||||||
|
ps.count = 2;
|
||||||
|
ps.count_falses = 0;
|
||||||
|
ps.first_ts = p->ts;
|
||||||
|
ps.last_ts = ts;
|
||||||
|
ps.pri = ts - p->ts;
|
||||||
|
ps.dur = ps.pri * (pde->rs->ppb - 1)
|
||||||
|
+ 2 * pde->rs->max_pri_tolerance;
|
||||||
|
|
||||||
|
p2 = p;
|
||||||
|
tmp_false_count = 0;
|
||||||
|
min_valid_ts = ts - ps.dur;
|
||||||
|
/* check which past pulses are candidates for new sequence */
|
||||||
|
list_for_each_entry_continue(p2, &pde->pulses, head) {
|
||||||
|
u32 factor;
|
||||||
|
if (p2->ts < min_valid_ts)
|
||||||
|
/* stop on crossing window border */
|
||||||
|
break;
|
||||||
|
/* check if pulse match (multi)PRI */
|
||||||
|
factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
|
||||||
|
pde->rs->max_pri_tolerance);
|
||||||
|
if (factor > 0) {
|
||||||
|
ps.count++;
|
||||||
|
ps.first_ts = p2->ts;
|
||||||
|
/*
|
||||||
|
* on match, add the intermediate falses
|
||||||
|
* and reset counter
|
||||||
|
*/
|
||||||
|
ps.count_falses += tmp_false_count;
|
||||||
|
tmp_false_count = 0;
|
||||||
|
} else {
|
||||||
|
/* this is a potential false one */
|
||||||
|
tmp_false_count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (ps.count < min_count)
|
||||||
|
/* did not reach minimum count, drop sequence */
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* this is a valid one, add it */
|
||||||
|
ps.deadline_ts = ps.first_ts + ps.dur;
|
||||||
|
|
||||||
|
if (!list_empty(&pseq_pool)) {
|
||||||
|
new_ps = list_first_entry(&pseq_pool,
|
||||||
|
struct pri_sequence, head);
|
||||||
|
list_del(&new_ps->head);
|
||||||
|
} else {
|
||||||
|
new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
|
||||||
|
if (new_ps == NULL)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
memcpy(new_ps, &ps, sizeof(ps));
|
||||||
|
INIT_LIST_HEAD(&new_ps->head);
|
||||||
|
list_add(&new_ps->head, &pde->sequences);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* check new ts and add to all matching existing sequences */
|
||||||
|
static u32
|
||||||
|
pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
|
||||||
|
{
|
||||||
|
u32 max_count = 0;
|
||||||
|
struct pri_sequence *ps, *ps2;
|
||||||
|
list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
|
||||||
|
u32 delta_ts;
|
||||||
|
u32 factor;
|
||||||
|
|
||||||
|
/* first ensure that sequence is within window */
|
||||||
|
if (ts > ps->deadline_ts) {
|
||||||
|
list_del_init(&ps->head);
|
||||||
|
list_add(&ps->head, &pseq_pool);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
delta_ts = ts - ps->last_ts;
|
||||||
|
factor = pde_get_multiple(delta_ts, ps->pri,
|
||||||
|
pde->rs->max_pri_tolerance);
|
||||||
|
if (factor > 0) {
|
||||||
|
ps->last_ts = ts;
|
||||||
|
ps->count++;
|
||||||
|
|
||||||
|
if (max_count < ps->count)
|
||||||
|
max_count = ps->count;
|
||||||
|
} else {
|
||||||
|
ps->count_falses++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return max_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct pri_sequence *
|
||||||
|
pseq_handler_check_detection(struct pri_detector *pde)
|
||||||
|
{
|
||||||
|
struct pri_sequence *ps;
|
||||||
|
|
||||||
|
if (list_empty(&pde->sequences))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
list_for_each_entry(ps, &pde->sequences, head) {
|
||||||
|
/*
|
||||||
|
* we assume to have enough matching confidence if we
|
||||||
|
* 1) have enough pulses
|
||||||
|
* 2) have more matching than false pulses
|
||||||
|
*/
|
||||||
|
if ((ps->count >= pde->rs->ppb_thresh) &&
|
||||||
|
(ps->count * pde->rs->num_pri >= ps->count_falses))
|
||||||
|
return ps;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* free pulse queue and sequences list and give objects back to pools */
|
||||||
|
static void pri_detector_reset(struct pri_detector *pde, u64 ts)
|
||||||
|
{
|
||||||
|
struct pri_sequence *ps, *ps0;
|
||||||
|
struct pulse_elem *p, *p0;
|
||||||
|
list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
|
||||||
|
list_del_init(&ps->head);
|
||||||
|
list_add(&ps->head, &pseq_pool);
|
||||||
|
}
|
||||||
|
list_for_each_entry_safe(p, p0, &pde->pulses, head) {
|
||||||
|
list_del_init(&p->head);
|
||||||
|
list_add(&p->head, &pulse_pool);
|
||||||
|
}
|
||||||
|
pde->count = 0;
|
||||||
|
pde->last_ts = ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pri_detector_exit(struct pri_detector *de)
|
||||||
|
{
|
||||||
|
pri_detector_reset(de, 0);
|
||||||
|
|
||||||
|
singleton_pool_references--;
|
||||||
|
if (singleton_pool_references == 0) {
|
||||||
|
/* free singleton pools with no references left */
|
||||||
|
struct pri_sequence *ps, *ps0;
|
||||||
|
struct pulse_elem *p, *p0;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(p, p0, &pulse_pool, head) {
|
||||||
|
list_del(&p->head);
|
||||||
|
kfree(p);
|
||||||
|
}
|
||||||
|
list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
|
||||||
|
list_del(&ps->head);
|
||||||
|
kfree(ps);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kfree(de);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pri_detector_add_pulse(struct pri_detector *de,
|
||||||
|
struct pulse_event *event)
|
||||||
|
{
|
||||||
|
u32 max_updated_seq;
|
||||||
|
struct pri_sequence *ps;
|
||||||
|
u64 ts = event->ts;
|
||||||
|
const struct radar_detector_specs *rs = de->rs;
|
||||||
|
|
||||||
|
/* ignore pulses not within width range */
|
||||||
|
if ((rs->width_min > event->width) || (rs->width_max < event->width))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if ((ts - de->last_ts) < rs->max_pri_tolerance)
|
||||||
|
/* if delta to last pulse is too short, don't use this pulse */
|
||||||
|
return false;
|
||||||
|
de->last_ts = ts;
|
||||||
|
|
||||||
|
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
|
||||||
|
|
||||||
|
if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
|
||||||
|
pr_err("failed to create pulse sequences\n");
|
||||||
|
pri_detector_reset(de, ts);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ps = pseq_handler_check_detection(de);
|
||||||
|
|
||||||
|
if (ps != NULL) {
|
||||||
|
pr_info("DFS: radar found: pri=%d, count=%d, count_false=%d\n",
|
||||||
|
ps->pri, ps->count, ps->count_falses);
|
||||||
|
pri_detector_reset(de, ts);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
pulse_queue_enqueue(de, ts);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct pri_detector *
|
||||||
|
pri_detector_init(const struct radar_detector_specs *rs)
|
||||||
|
{
|
||||||
|
struct pri_detector *de;
|
||||||
|
de = kzalloc(sizeof(*de), GFP_KERNEL);
|
||||||
|
if (de == NULL)
|
||||||
|
return NULL;
|
||||||
|
de->exit = pri_detector_exit;
|
||||||
|
de->add_pulse = pri_detector_add_pulse;
|
||||||
|
de->reset = pri_detector_reset;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&de->sequences);
|
||||||
|
INIT_LIST_HEAD(&de->pulses);
|
||||||
|
de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
|
||||||
|
de->max_count = rs->ppb * 2;
|
||||||
|
de->rs = rs;
|
||||||
|
|
||||||
|
singleton_pool_references++;
|
||||||
|
return de;
|
||||||
|
}
|
52
drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
Normal file
52
drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2012 Neratec Solutions AG
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef DFS_PRI_DETECTOR_H
|
||||||
|
#define DFS_PRI_DETECTOR_H
|
||||||
|
|
||||||
|
#include <linux/list.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct pri_detector - PRI detector element for a dedicated radar type
|
||||||
|
* @exit(): destructor
|
||||||
|
* @add_pulse(): add pulse event, returns true if pattern was detected
|
||||||
|
* @reset(): clear states and reset to given time stamp
|
||||||
|
* @rs: detector specs for this detector element
|
||||||
|
* @last_ts: last pulse time stamp considered for this element in usecs
|
||||||
|
* @sequences: list_head holding potential pulse sequences
|
||||||
|
* @pulses: list connecting pulse_elem objects
|
||||||
|
* @count: number of pulses in queue
|
||||||
|
* @max_count: maximum number of pulses to be queued
|
||||||
|
* @window_size: window size back from newest pulse time stamp in usecs
|
||||||
|
*/
|
||||||
|
struct pri_detector {
|
||||||
|
void (*exit) (struct pri_detector *de);
|
||||||
|
bool (*add_pulse)(struct pri_detector *de, struct pulse_event *e);
|
||||||
|
void (*reset) (struct pri_detector *de, u64 ts);
|
||||||
|
|
||||||
|
/* private: internal use only */
|
||||||
|
const struct radar_detector_specs *rs;
|
||||||
|
u64 last_ts;
|
||||||
|
struct list_head sequences;
|
||||||
|
struct list_head pulses;
|
||||||
|
u32 count;
|
||||||
|
u32 max_count;
|
||||||
|
u32 window_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs);
|
||||||
|
|
||||||
|
#endif /* DFS_PRI_DETECTOR_H */
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "htc.h"
|
#include "htc.h"
|
||||||
|
|
||||||
MODULE_AUTHOR("Atheros Communications");
|
MODULE_AUTHOR("Atheros Communications");
|
||||||
@ -711,7 +713,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
|
|||||||
|
|
||||||
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
|
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
|
||||||
|
|
||||||
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
|
||||||
|
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
|
||||||
|
|
||||||
hw->queues = 4;
|
hw->queues = 4;
|
||||||
hw->channel_change_time = 5000;
|
hw->channel_change_time = 5000;
|
||||||
@ -966,9 +969,7 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
|
|||||||
static int __init ath9k_htc_init(void)
|
static int __init ath9k_htc_init(void)
|
||||||
{
|
{
|
||||||
if (ath9k_hif_usb_init() < 0) {
|
if (ath9k_hif_usb_init() < 0) {
|
||||||
printk(KERN_ERR
|
pr_err("No USB devices found, driver not installed\n");
|
||||||
"ath9k_htc: No USB devices found,"
|
|
||||||
" driver not installed.\n");
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -979,6 +980,6 @@ module_init(ath9k_htc_init);
|
|||||||
static void __exit ath9k_htc_exit(void)
|
static void __exit ath9k_htc_exit(void)
|
||||||
{
|
{
|
||||||
ath9k_hif_usb_exit();
|
ath9k_hif_usb_exit();
|
||||||
printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
|
pr_info("Driver unloaded\n");
|
||||||
}
|
}
|
||||||
module_exit(ath9k_htc_exit);
|
module_exit(ath9k_htc_exit);
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include "htc.h"
|
#include "htc.h"
|
||||||
|
|
||||||
static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
|
static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
|
||||||
@ -461,7 +463,7 @@ int ath9k_htc_hw_init(struct htc_target *target,
|
|||||||
char *product, u32 drv_info)
|
char *product, u32 drv_info)
|
||||||
{
|
{
|
||||||
if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
|
if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
|
||||||
printk(KERN_ERR "Failed to initialize the device\n");
|
pr_err("Failed to initialize the device\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1491,11 +1491,84 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states,
|
||||||
|
int *hang_state, int *hang_pos)
|
||||||
|
{
|
||||||
|
static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */
|
||||||
|
u32 chain_state, dcs_pos, i;
|
||||||
|
|
||||||
|
for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) {
|
||||||
|
chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f;
|
||||||
|
for (i = 0; i < 3; i++) {
|
||||||
|
if (chain_state == dcu_chain_state[i]) {
|
||||||
|
*hang_state = chain_state;
|
||||||
|
*hang_pos = dcs_pos;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DCU_COMPLETE_STATE 1
|
||||||
|
#define DCU_COMPLETE_STATE_MASK 0x3
|
||||||
|
#define NUM_STATUS_READS 50
|
||||||
|
static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
|
||||||
|
{
|
||||||
|
u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4;
|
||||||
|
u32 i, hang_pos, hang_state, num_state = 6;
|
||||||
|
|
||||||
|
comp_state = REG_READ(ah, AR_DMADBG_6);
|
||||||
|
|
||||||
|
if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) {
|
||||||
|
ath_dbg(ath9k_hw_common(ah), RESET,
|
||||||
|
"MAC Hang signature not found at DCU complete\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
chain_state = REG_READ(ah, dcs_reg);
|
||||||
|
if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
|
||||||
|
goto hang_check_iter;
|
||||||
|
|
||||||
|
dcs_reg = AR_DMADBG_5;
|
||||||
|
num_state = 4;
|
||||||
|
chain_state = REG_READ(ah, dcs_reg);
|
||||||
|
if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
|
||||||
|
goto hang_check_iter;
|
||||||
|
|
||||||
|
ath_dbg(ath9k_hw_common(ah), RESET,
|
||||||
|
"MAC Hang signature 1 not found\n");
|
||||||
|
return false;
|
||||||
|
|
||||||
|
hang_check_iter:
|
||||||
|
ath_dbg(ath9k_hw_common(ah), RESET,
|
||||||
|
"DCU registers: chain %08x complete %08x Hang: state %d pos %d\n",
|
||||||
|
chain_state, comp_state, hang_state, hang_pos);
|
||||||
|
|
||||||
|
for (i = 0; i < NUM_STATUS_READS; i++) {
|
||||||
|
chain_state = REG_READ(ah, dcs_reg);
|
||||||
|
chain_state = (chain_state >> (5 * hang_pos)) & 0x1f;
|
||||||
|
comp_state = REG_READ(ah, AR_DMADBG_6);
|
||||||
|
|
||||||
|
if (((comp_state & DCU_COMPLETE_STATE_MASK) !=
|
||||||
|
DCU_COMPLETE_STATE) ||
|
||||||
|
(chain_state != hang_state))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n");
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool ath9k_hw_check_alive(struct ath_hw *ah)
|
bool ath9k_hw_check_alive(struct ath_hw *ah)
|
||||||
{
|
{
|
||||||
int count = 50;
|
int count = 50;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
|
if (AR_SREV_9300(ah))
|
||||||
|
return !ath9k_hw_detect_mac_hang(ah);
|
||||||
|
|
||||||
if (AR_SREV_9285_12_OR_LATER(ah))
|
if (AR_SREV_9285_12_OR_LATER(ah))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/ath9k_platform.h>
|
#include <linux/ath9k_platform.h>
|
||||||
@ -519,6 +521,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
|
|||||||
atomic_set(&ah->intr_ref_cnt, -1);
|
atomic_set(&ah->intr_ref_cnt, -1);
|
||||||
sc->sc_ah = ah;
|
sc->sc_ah = ah;
|
||||||
|
|
||||||
|
sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET);
|
||||||
|
|
||||||
if (!pdata) {
|
if (!pdata) {
|
||||||
ah->ah_flags |= AH_USE_EEPROM;
|
ah->ah_flags |= AH_USE_EEPROM;
|
||||||
sc->sc_ah->led_pin = -1;
|
sc->sc_ah->led_pin = -1;
|
||||||
@ -676,6 +680,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
|
|||||||
|
|
||||||
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
|
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
|
||||||
|
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
|
||||||
|
|
||||||
hw->queues = 4;
|
hw->queues = 4;
|
||||||
hw->max_rates = 4;
|
hw->max_rates = 4;
|
||||||
@ -779,6 +784,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
|
|||||||
goto error_world;
|
goto error_world;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
|
||||||
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
|
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
|
||||||
|
|
||||||
ath_init_leds(sc);
|
ath_init_leds(sc);
|
||||||
@ -821,6 +827,8 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
|
|||||||
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
|
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
|
||||||
|
|
||||||
ath9k_hw_deinit(sc->sc_ah);
|
ath9k_hw_deinit(sc->sc_ah);
|
||||||
|
if (sc->dfs_detector != NULL)
|
||||||
|
sc->dfs_detector->exit(sc->dfs_detector);
|
||||||
|
|
||||||
kfree(sc->sc_ah);
|
kfree(sc->sc_ah);
|
||||||
sc->sc_ah = NULL;
|
sc->sc_ah = NULL;
|
||||||
@ -866,17 +874,14 @@ static int __init ath9k_init(void)
|
|||||||
/* Register rate control algorithm */
|
/* Register rate control algorithm */
|
||||||
error = ath_rate_control_register();
|
error = ath_rate_control_register();
|
||||||
if (error != 0) {
|
if (error != 0) {
|
||||||
printk(KERN_ERR
|
pr_err("Unable to register rate control algorithm: %d\n",
|
||||||
"ath9k: Unable to register rate control "
|
|
||||||
"algorithm: %d\n",
|
|
||||||
error);
|
error);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
error = ath_pci_init();
|
error = ath_pci_init();
|
||||||
if (error < 0) {
|
if (error < 0) {
|
||||||
printk(KERN_ERR
|
pr_err("No PCI devices found, driver not installed\n");
|
||||||
"ath9k: No PCI devices found, driver not installed.\n");
|
|
||||||
error = -ENODEV;
|
error = -ENODEV;
|
||||||
goto err_rate_unregister;
|
goto err_rate_unregister;
|
||||||
}
|
}
|
||||||
@ -905,6 +910,6 @@ static void __exit ath9k_exit(void)
|
|||||||
ath_ahb_exit();
|
ath_ahb_exit();
|
||||||
ath_pci_exit();
|
ath_pci_exit();
|
||||||
ath_rate_control_unregister();
|
ath_rate_control_unregister();
|
||||||
printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
|
pr_info("%s: Driver unloaded\n", dev_info);
|
||||||
}
|
}
|
||||||
module_exit(ath9k_exit);
|
module_exit(ath9k_exit);
|
||||||
|
@ -118,15 +118,13 @@ void ath9k_ps_restore(struct ath_softc *sc)
|
|||||||
if (--sc->ps_usecount != 0)
|
if (--sc->ps_usecount != 0)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
if (sc->ps_flags & PS_WAIT_FOR_TX_ACK)
|
if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
if (sc->ps_idle)
|
|
||||||
mode = ATH9K_PM_FULL_SLEEP;
|
mode = ATH9K_PM_FULL_SLEEP;
|
||||||
else if (sc->ps_enabled &&
|
else if (sc->ps_enabled &&
|
||||||
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
|
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
|
||||||
PS_WAIT_FOR_CAB |
|
PS_WAIT_FOR_CAB |
|
||||||
PS_WAIT_FOR_PSPOLL_DATA)))
|
PS_WAIT_FOR_PSPOLL_DATA |
|
||||||
|
PS_WAIT_FOR_TX_ACK)))
|
||||||
mode = ATH9K_PM_NETWORK_SLEEP;
|
mode = ATH9K_PM_NETWORK_SLEEP;
|
||||||
else
|
else
|
||||||
goto unlock;
|
goto unlock;
|
||||||
@ -243,6 +241,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
|
|||||||
|
|
||||||
sc->hw_busy_count = 0;
|
sc->hw_busy_count = 0;
|
||||||
del_timer_sync(&common->ani.timer);
|
del_timer_sync(&common->ani.timer);
|
||||||
|
del_timer_sync(&sc->rx_poll_timer);
|
||||||
|
|
||||||
ath9k_debug_samp_bb_mac(sc);
|
ath9k_debug_samp_bb_mac(sc);
|
||||||
ath9k_hw_disable_interrupts(ah);
|
ath9k_hw_disable_interrupts(ah);
|
||||||
@ -284,6 +283,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
|
|||||||
|
|
||||||
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
|
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
|
||||||
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
|
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
|
||||||
|
ath_start_rx_poll(sc, 3);
|
||||||
if (!common->disable_ani)
|
if (!common->disable_ani)
|
||||||
ath_start_ani(common);
|
ath_start_ani(common);
|
||||||
}
|
}
|
||||||
@ -914,10 +914,19 @@ void ath_hw_check(struct work_struct *work)
|
|||||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int busy;
|
int busy;
|
||||||
|
u8 is_alive, nbeacon = 1;
|
||||||
|
|
||||||
ath9k_ps_wakeup(sc);
|
ath9k_ps_wakeup(sc);
|
||||||
if (ath9k_hw_check_alive(sc->sc_ah))
|
is_alive = ath9k_hw_check_alive(sc->sc_ah);
|
||||||
|
|
||||||
|
if (is_alive && !AR_SREV_9300(sc->sc_ah))
|
||||||
goto out;
|
goto out;
|
||||||
|
else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
|
||||||
|
ath_dbg(common, RESET,
|
||||||
|
"DCU stuck is detected. Schedule chip reset\n");
|
||||||
|
RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
|
||||||
|
goto sched_reset;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&common->cc_lock, flags);
|
spin_lock_irqsave(&common->cc_lock, flags);
|
||||||
busy = ath_update_survey_stats(sc);
|
busy = ath_update_survey_stats(sc);
|
||||||
@ -928,12 +937,18 @@ void ath_hw_check(struct work_struct *work)
|
|||||||
if (busy >= 99) {
|
if (busy >= 99) {
|
||||||
if (++sc->hw_busy_count >= 3) {
|
if (++sc->hw_busy_count >= 3) {
|
||||||
RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
|
RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
|
||||||
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
|
goto sched_reset;
|
||||||
|
}
|
||||||
|
} else if (busy >= 0) {
|
||||||
|
sc->hw_busy_count = 0;
|
||||||
|
nbeacon = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (busy >= 0)
|
ath_start_rx_poll(sc, nbeacon);
|
||||||
sc->hw_busy_count = 0;
|
goto out;
|
||||||
|
|
||||||
|
sched_reset:
|
||||||
|
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
|
||||||
out:
|
out:
|
||||||
ath9k_ps_restore(sc);
|
ath9k_ps_restore(sc);
|
||||||
}
|
}
|
||||||
@ -1135,6 +1150,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|||||||
|
|
||||||
if (ath_tx_start(hw, skb, &txctl) != 0) {
|
if (ath_tx_start(hw, skb, &txctl) != 0) {
|
||||||
ath_dbg(common, XMIT, "TX failed\n");
|
ath_dbg(common, XMIT, "TX failed\n");
|
||||||
|
TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1153,6 +1169,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
|
|||||||
mutex_lock(&sc->mutex);
|
mutex_lock(&sc->mutex);
|
||||||
|
|
||||||
ath_cancel_work(sc);
|
ath_cancel_work(sc);
|
||||||
|
del_timer_sync(&sc->rx_poll_timer);
|
||||||
|
|
||||||
if (sc->sc_flags & SC_OP_INVALID) {
|
if (sc->sc_flags & SC_OP_INVALID) {
|
||||||
ath_dbg(common, ANY, "Device not present\n");
|
ath_dbg(common, ANY, "Device not present\n");
|
||||||
@ -1385,6 +1402,24 @@ static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
|
||||||
|
{
|
||||||
|
if (!AR_SREV_9300(sc->sc_ah))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
|
||||||
|
return;
|
||||||
|
|
||||||
|
mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
|
||||||
|
(nbeacon * sc->cur_beacon_conf.beacon_interval));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ath_rx_poll(unsigned long data)
|
||||||
|
{
|
||||||
|
struct ath_softc *sc = (struct ath_softc *)data;
|
||||||
|
|
||||||
|
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
|
||||||
|
}
|
||||||
|
|
||||||
static int ath9k_add_interface(struct ieee80211_hw *hw,
|
static int ath9k_add_interface(struct ieee80211_hw *hw,
|
||||||
struct ieee80211_vif *vif)
|
struct ieee80211_vif *vif)
|
||||||
@ -1906,6 +1941,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
|
|||||||
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
|
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
|
||||||
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
|
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
|
||||||
|
|
||||||
|
ath_start_rx_poll(sc, 3);
|
||||||
|
|
||||||
if (!common->disable_ani) {
|
if (!common->disable_ani) {
|
||||||
sc->sc_flags |= SC_OP_ANI_RUN;
|
sc->sc_flags |= SC_OP_ANI_RUN;
|
||||||
ath_start_ani(common);
|
ath_start_ani(common);
|
||||||
@ -1945,6 +1982,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
|
|||||||
/* Stop ANI */
|
/* Stop ANI */
|
||||||
sc->sc_flags &= ~SC_OP_ANI_RUN;
|
sc->sc_flags &= ~SC_OP_ANI_RUN;
|
||||||
del_timer_sync(&common->ani.timer);
|
del_timer_sync(&common->ani.timer);
|
||||||
|
del_timer_sync(&sc->rx_poll_timer);
|
||||||
memset(&sc->caldata, 0, sizeof(sc->caldata));
|
memset(&sc->caldata, 0, sizeof(sc->caldata));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1988,6 +2026,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
|
|||||||
} else {
|
} else {
|
||||||
sc->sc_flags &= ~SC_OP_ANI_RUN;
|
sc->sc_flags &= ~SC_OP_ANI_RUN;
|
||||||
del_timer_sync(&common->ani.timer);
|
del_timer_sync(&common->ani.timer);
|
||||||
|
del_timer_sync(&sc->rx_poll_timer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/nl80211.h>
|
#include <linux/nl80211.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/pci-aspm.h>
|
#include <linux/pci-aspm.h>
|
||||||
@ -171,14 +173,13 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
|
pr_err("32-bit DMA not available\n");
|
||||||
goto err_dma;
|
goto err_dma;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
|
pr_err("32-bit DMA consistent DMA enable failed\n");
|
||||||
"DMA enable failed\n");
|
|
||||||
goto err_dma;
|
goto err_dma;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,7 +225,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
|
|
||||||
mem = pci_iomap(pdev, 0, 0);
|
mem = pci_iomap(pdev, 0, 0);
|
||||||
if (!mem) {
|
if (!mem) {
|
||||||
printk(KERN_ERR "PCI memory map error\n") ;
|
pr_err("PCI memory map error\n") ;
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto err_iomap;
|
goto err_iomap;
|
||||||
}
|
}
|
||||||
|
@ -1436,7 +1436,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
|
|||||||
|
|
||||||
static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
|
static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
|
||||||
struct ieee80211_sta *sta, void *priv_sta,
|
struct ieee80211_sta *sta, void *priv_sta,
|
||||||
u32 changed, enum nl80211_channel_type oper_chan_type)
|
u32 changed)
|
||||||
{
|
{
|
||||||
struct ath_softc *sc = priv;
|
struct ath_softc *sc = priv;
|
||||||
struct ath_rate_priv *ath_rc_priv = priv_sta;
|
struct ath_rate_priv *ath_rc_priv = priv_sta;
|
||||||
@ -1447,12 +1447,11 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
|
|||||||
|
|
||||||
/* FIXME: Handle AP mode later when we support CWM */
|
/* FIXME: Handle AP mode later when we support CWM */
|
||||||
|
|
||||||
if (changed & IEEE80211_RC_HT_CHANGED) {
|
if (changed & IEEE80211_RC_BW_CHANGED) {
|
||||||
if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
|
if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (oper_chan_type == NL80211_CHAN_HT40MINUS ||
|
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
|
||||||
oper_chan_type == NL80211_CHAN_HT40PLUS)
|
|
||||||
oper_cw40 = true;
|
oper_cw40 = true;
|
||||||
|
|
||||||
if (oper_cw40)
|
if (oper_cw40)
|
||||||
|
@ -824,15 +824,20 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
|||||||
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
|
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
|
||||||
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
|
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
|
||||||
|
|
||||||
if (!rx_stats->rs_datalen)
|
if (!rx_stats->rs_datalen) {
|
||||||
|
RX_STAT_INC(rx_len_err);
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rs_status follows rs_datalen so if rs_datalen is too large
|
* rs_status follows rs_datalen so if rs_datalen is too large
|
||||||
* we can take a hint that hardware corrupted it, so ignore
|
* we can take a hint that hardware corrupted it, so ignore
|
||||||
* those frames.
|
* those frames.
|
||||||
*/
|
*/
|
||||||
if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
|
if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
|
||||||
|
RX_STAT_INC(rx_len_err);
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Only use error bits from the last fragment */
|
/* Only use error bits from the last fragment */
|
||||||
if (rx_stats->rs_more)
|
if (rx_stats->rs_more)
|
||||||
@ -902,6 +907,7 @@ static int ath9k_process_rate(struct ath_common *common,
|
|||||||
struct ieee80211_supported_band *sband;
|
struct ieee80211_supported_band *sband;
|
||||||
enum ieee80211_band band;
|
enum ieee80211_band band;
|
||||||
unsigned int i = 0;
|
unsigned int i = 0;
|
||||||
|
struct ath_softc *sc = (struct ath_softc *) common->priv;
|
||||||
|
|
||||||
band = hw->conf.channel->band;
|
band = hw->conf.channel->band;
|
||||||
sband = hw->wiphy->bands[band];
|
sband = hw->wiphy->bands[band];
|
||||||
@ -936,7 +942,7 @@ static int ath9k_process_rate(struct ath_common *common,
|
|||||||
ath_dbg(common, ANY,
|
ath_dbg(common, ANY,
|
||||||
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
|
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
|
||||||
rx_stats->rs_rate);
|
rx_stats->rs_rate);
|
||||||
|
RX_STAT_INC(rx_rate_err);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1823,12 +1829,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||||||
|
|
||||||
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
|
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
|
||||||
rxs = IEEE80211_SKB_RXCB(hdr_skb);
|
rxs = IEEE80211_SKB_RXCB(hdr_skb);
|
||||||
if (ieee80211_is_beacon(hdr->frame_control) &&
|
if (ieee80211_is_beacon(hdr->frame_control)) {
|
||||||
!is_zero_ether_addr(common->curbssid) &&
|
RX_STAT_INC(rx_beacons);
|
||||||
|
if (!is_zero_ether_addr(common->curbssid) &&
|
||||||
!compare_ether_addr(hdr->addr3, common->curbssid))
|
!compare_ether_addr(hdr->addr3, common->curbssid))
|
||||||
rs.is_mybeacon = true;
|
rs.is_mybeacon = true;
|
||||||
else
|
else
|
||||||
rs.is_mybeacon = false;
|
rs.is_mybeacon = false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
rs.is_mybeacon = false;
|
||||||
|
|
||||||
ath_debug_stat_rx(sc, &rs);
|
ath_debug_stat_rx(sc, &rs);
|
||||||
|
|
||||||
@ -1836,8 +1846,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||||||
* If we're asked to flush receive queue, directly
|
* If we're asked to flush receive queue, directly
|
||||||
* chain it back at the queue without processing it.
|
* chain it back at the queue without processing it.
|
||||||
*/
|
*/
|
||||||
if (sc->sc_flags & SC_OP_RXFLUSH)
|
if (sc->sc_flags & SC_OP_RXFLUSH) {
|
||||||
|
RX_STAT_INC(rx_drop_rxflush);
|
||||||
goto requeue_drop_frag;
|
goto requeue_drop_frag;
|
||||||
|
}
|
||||||
|
|
||||||
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
|
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
|
||||||
|
|
||||||
@ -1855,6 +1867,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||||||
if (retval)
|
if (retval)
|
||||||
goto requeue_drop_frag;
|
goto requeue_drop_frag;
|
||||||
|
|
||||||
|
if (rs.is_mybeacon) {
|
||||||
|
sc->hw_busy_count = 0;
|
||||||
|
ath_start_rx_poll(sc, 3);
|
||||||
|
}
|
||||||
/* Ensure we always have an skb to requeue once we are done
|
/* Ensure we always have an skb to requeue once we are done
|
||||||
* processing the current buffer's skb */
|
* processing the current buffer's skb */
|
||||||
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
|
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
|
||||||
@ -1863,8 +1879,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||||||
* tell hardware it can give us a new frame using the old
|
* tell hardware it can give us a new frame using the old
|
||||||
* skb and put it at the tail of the sc->rx.rxbuf list for
|
* skb and put it at the tail of the sc->rx.rxbuf list for
|
||||||
* processing. */
|
* processing. */
|
||||||
if (!requeue_skb)
|
if (!requeue_skb) {
|
||||||
|
RX_STAT_INC(rx_oom_err);
|
||||||
goto requeue_drop_frag;
|
goto requeue_drop_frag;
|
||||||
|
}
|
||||||
|
|
||||||
/* Unmap the frame */
|
/* Unmap the frame */
|
||||||
dma_unmap_single(sc->dev, bf->bf_buf_addr,
|
dma_unmap_single(sc->dev, bf->bf_buf_addr,
|
||||||
@ -1895,6 +1913,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (rs.rs_more) {
|
if (rs.rs_more) {
|
||||||
|
RX_STAT_INC(rx_frags);
|
||||||
/*
|
/*
|
||||||
* rs_more indicates chained descriptors which can be
|
* rs_more indicates chained descriptors which can be
|
||||||
* used to link buffers together for a sort of
|
* used to link buffers together for a sort of
|
||||||
@ -1904,6 +1923,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||||||
/* too many fragments - cannot handle frame */
|
/* too many fragments - cannot handle frame */
|
||||||
dev_kfree_skb_any(sc->rx.frag);
|
dev_kfree_skb_any(sc->rx.frag);
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
RX_STAT_INC(rx_too_many_frags_err);
|
||||||
skb = NULL;
|
skb = NULL;
|
||||||
}
|
}
|
||||||
sc->rx.frag = skb;
|
sc->rx.frag = skb;
|
||||||
@ -1915,6 +1935,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||||||
|
|
||||||
if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
|
if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
|
RX_STAT_INC(rx_oom_err);
|
||||||
goto requeue_drop_frag;
|
goto requeue_drop_frag;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ __regwrite_out : \
|
|||||||
|
|
||||||
#define carl9170_regwrite_result() \
|
#define carl9170_regwrite_result() \
|
||||||
__err; \
|
__err; \
|
||||||
} while (0);
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
#define carl9170_async_regwrite_get_buf() \
|
#define carl9170_async_regwrite_get_buf() \
|
||||||
@ -126,7 +126,7 @@ do { \
|
|||||||
__err = -ENOMEM; \
|
__err = -ENOMEM; \
|
||||||
goto __async_regwrite_out; \
|
goto __async_regwrite_out; \
|
||||||
} \
|
} \
|
||||||
} while (0);
|
} while (0)
|
||||||
|
|
||||||
#define carl9170_async_regwrite_begin(carl) \
|
#define carl9170_async_regwrite_begin(carl) \
|
||||||
do { \
|
do { \
|
||||||
@ -169,6 +169,6 @@ __async_regwrite_out: \
|
|||||||
|
|
||||||
#define carl9170_async_regwrite_result() \
|
#define carl9170_async_regwrite_result() \
|
||||||
__err; \
|
__err; \
|
||||||
} while (0);
|
} while (0)
|
||||||
|
|
||||||
#endif /* __CMD_H */
|
#endif /* __CMD_H */
|
||||||
|
@ -355,6 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
|
|||||||
|
|
||||||
ar->hw->wiphy->interface_modes |= if_comb_types;
|
ar->hw->wiphy->interface_modes |= if_comb_types;
|
||||||
|
|
||||||
|
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
|
||||||
|
|
||||||
#undef SUPPORTED
|
#undef SUPPORTED
|
||||||
return carl9170_fw_tx_sequence(ar);
|
return carl9170_fw_tx_sequence(ar);
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
@ -49,7 +51,7 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
|
|||||||
if (off != 0)
|
if (off != 0)
|
||||||
skb_reserve(skb, common->cachelsz - off);
|
skb_reserve(skb, common->cachelsz - off);
|
||||||
} else {
|
} else {
|
||||||
printk(KERN_ERR "skbuff alloc of size %u failed\n", len);
|
pr_err("skbuff alloc of size %u failed\n", len);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <net/cfg80211.h>
|
#include <net/cfg80211.h>
|
||||||
@ -562,7 +564,7 @@ static int __ath_regd_init(struct ath_regulatory *reg)
|
|||||||
printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
|
printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
|
||||||
|
|
||||||
if (!ath_regd_is_eeprom_valid(reg)) {
|
if (!ath_regd_is_eeprom_valid(reg)) {
|
||||||
printk(KERN_ERR "ath: Invalid EEPROM contents\n");
|
pr_err("Invalid EEPROM contents\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4010,6 +4010,20 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||||||
if (modparam_nohwcrypt)
|
if (modparam_nohwcrypt)
|
||||||
return -ENOSPC; /* User disabled HW-crypto */
|
return -ENOSPC; /* User disabled HW-crypto */
|
||||||
|
|
||||||
|
if ((vif->type == NL80211_IFTYPE_ADHOC ||
|
||||||
|
vif->type == NL80211_IFTYPE_MESH_POINT) &&
|
||||||
|
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
|
||||||
|
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
|
||||||
|
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
|
||||||
|
/*
|
||||||
|
* For now, disable hw crypto for the RSN IBSS group keys. This
|
||||||
|
* could be optimized in the future, but until that gets
|
||||||
|
* implemented, use of software crypto for group addressed
|
||||||
|
* frames is a acceptable to allow RSN IBSS to be used.
|
||||||
|
*/
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&wl->mutex);
|
mutex_lock(&wl->mutex);
|
||||||
|
|
||||||
dev = wl->current_dev;
|
dev = wl->current_dev;
|
||||||
@ -5275,6 +5289,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
|
|||||||
BIT(NL80211_IFTYPE_WDS) |
|
BIT(NL80211_IFTYPE_WDS) |
|
||||||
BIT(NL80211_IFTYPE_ADHOC);
|
BIT(NL80211_IFTYPE_ADHOC);
|
||||||
|
|
||||||
|
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||||
|
|
||||||
hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
|
hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
|
||||||
wl->mac80211_initially_registered_queues = hw->queues;
|
wl->mac80211_initially_registered_queues = hw->queues;
|
||||||
hw->max_rates = 2;
|
hw->max_rates = 2;
|
||||||
|
@ -378,7 +378,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
|
|||||||
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
|
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
|
||||||
phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
|
phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
|
||||||
|
|
||||||
switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) {
|
switch (b43_ieee80211_antenna_sanitize(dev, 0)) {
|
||||||
case 0: /* Default */
|
case 0: /* Default */
|
||||||
phy_ctl |= B43_TXH_PHY_ANT01AUTO;
|
phy_ctl |= B43_TXH_PHY_ANT01AUTO;
|
||||||
break;
|
break;
|
||||||
|
@ -277,19 +277,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
|
|||||||
phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM;
|
phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM;
|
||||||
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
|
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
|
||||||
phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
|
phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
|
||||||
switch (info->antenna_sel_tx) {
|
|
||||||
case 0:
|
|
||||||
phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
|
phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
phy_ctl |= B43legacy_TX4_PHY_ANT0;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
phy_ctl |= B43legacy_TX4_PHY_ANT1;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
B43legacy_BUG_ON(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* MAC control */
|
/* MAC control */
|
||||||
rates = info->control.rates;
|
rates = info->control.rates;
|
||||||
|
@ -733,7 +733,7 @@ struct cck_phy_hdr {
|
|||||||
do { \
|
do { \
|
||||||
plcp[1] = len & 0xff; \
|
plcp[1] = len & 0xff; \
|
||||||
plcp[2] = ((len >> 8) & 0xff); \
|
plcp[2] = ((len >> 8) & 0xff); \
|
||||||
} while (0);
|
} while (0)
|
||||||
|
|
||||||
#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
|
#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
|
||||||
#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
|
#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
|
||||||
|
@ -135,15 +135,6 @@ enum {
|
|||||||
IPW_HW_STATE_ENABLED = 0
|
IPW_HW_STATE_ENABLED = 0
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ssid_context {
|
|
||||||
char ssid[IW_ESSID_MAX_SIZE + 1];
|
|
||||||
int ssid_len;
|
|
||||||
unsigned char bssid[ETH_ALEN];
|
|
||||||
int port_type;
|
|
||||||
int channel;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
extern const char *port_type_str[];
|
extern const char *port_type_str[];
|
||||||
extern const char *band_str[];
|
extern const char *band_str[];
|
||||||
|
|
||||||
|
@ -584,61 +584,6 @@ struct libipw_tim_parameters {
|
|||||||
|
|
||||||
/*******************************************************/
|
/*******************************************************/
|
||||||
|
|
||||||
enum { /* libipw_basic_report.map */
|
|
||||||
LIBIPW_BASIC_MAP_BSS = (1 << 0),
|
|
||||||
LIBIPW_BASIC_MAP_OFDM = (1 << 1),
|
|
||||||
LIBIPW_BASIC_MAP_UNIDENTIFIED = (1 << 2),
|
|
||||||
LIBIPW_BASIC_MAP_RADAR = (1 << 3),
|
|
||||||
LIBIPW_BASIC_MAP_UNMEASURED = (1 << 4),
|
|
||||||
/* Bits 5-7 are reserved */
|
|
||||||
|
|
||||||
};
|
|
||||||
struct libipw_basic_report {
|
|
||||||
u8 channel;
|
|
||||||
__le64 start_time;
|
|
||||||
__le16 duration;
|
|
||||||
u8 map;
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
enum { /* libipw_measurement_request.mode */
|
|
||||||
/* Bit 0 is reserved */
|
|
||||||
LIBIPW_MEASUREMENT_ENABLE = (1 << 1),
|
|
||||||
LIBIPW_MEASUREMENT_REQUEST = (1 << 2),
|
|
||||||
LIBIPW_MEASUREMENT_REPORT = (1 << 3),
|
|
||||||
/* Bits 4-7 are reserved */
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
|
||||||
LIBIPW_REPORT_BASIC = 0, /* required */
|
|
||||||
LIBIPW_REPORT_CCA = 1, /* optional */
|
|
||||||
LIBIPW_REPORT_RPI = 2, /* optional */
|
|
||||||
/* 3-255 reserved */
|
|
||||||
};
|
|
||||||
|
|
||||||
struct libipw_measurement_params {
|
|
||||||
u8 channel;
|
|
||||||
__le64 start_time;
|
|
||||||
__le16 duration;
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
struct libipw_measurement_request {
|
|
||||||
struct libipw_info_element ie;
|
|
||||||
u8 token;
|
|
||||||
u8 mode;
|
|
||||||
u8 type;
|
|
||||||
struct libipw_measurement_params params[0];
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
struct libipw_measurement_report {
|
|
||||||
struct libipw_info_element ie;
|
|
||||||
u8 token;
|
|
||||||
u8 mode;
|
|
||||||
u8 type;
|
|
||||||
union {
|
|
||||||
struct libipw_basic_report basic[0];
|
|
||||||
} u;
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
struct libipw_tpc_report {
|
struct libipw_tpc_report {
|
||||||
u8 transmit_power;
|
u8 transmit_power;
|
||||||
u8 link_margin;
|
u8 link_margin;
|
||||||
|
@ -2850,9 +2850,9 @@ void
|
|||||||
il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
|
il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
|
||||||
struct ieee80211_tx_info *info)
|
struct ieee80211_tx_info *info)
|
||||||
{
|
{
|
||||||
struct ieee80211_tx_rate *r = &info->control.rates[0];
|
struct ieee80211_tx_rate *r = &info->status.rates[0];
|
||||||
|
|
||||||
info->antenna_sel_tx =
|
info->status.antenna =
|
||||||
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
|
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
|
||||||
if (rate_n_flags & RATE_MCS_HT_MSK)
|
if (rate_n_flags & RATE_MCS_HT_MSK)
|
||||||
r->flags |= IEEE80211_TX_RC_MCS;
|
r->flags |= IEEE80211_TX_RC_MCS;
|
||||||
|
@ -873,7 +873,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
|
|||||||
tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
|
tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
|
||||||
tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
|
tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
|
||||||
tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
|
tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
|
||||||
tbl_type.ant_type != info->antenna_sel_tx ||
|
tbl_type.ant_type != info->status.antenna ||
|
||||||
!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
|
!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
|
||||||
|| !!(tx_rate & RATE_MCS_GF_MSK) !=
|
|| !!(tx_rate & RATE_MCS_GF_MSK) !=
|
||||||
!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
|
!!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
|
||||||
|
@ -136,3 +136,11 @@ config IWLWIFI_EXPERIMENTAL_MFP
|
|||||||
even if the microcode doesn't advertise it.
|
even if the microcode doesn't advertise it.
|
||||||
|
|
||||||
Say Y only if you want to experiment with MFP.
|
Say Y only if you want to experiment with MFP.
|
||||||
|
|
||||||
|
config IWLWIFI_UCODE16
|
||||||
|
bool "support uCode 16.0"
|
||||||
|
depends on IWLWIFI
|
||||||
|
help
|
||||||
|
This option enables support for uCode version 16.0.
|
||||||
|
|
||||||
|
Say Y if you want to use 16.0 microcode.
|
||||||
|
@ -17,6 +17,8 @@ iwlwifi-objs += iwl-drv.o
|
|||||||
iwlwifi-objs += iwl-notif-wait.o
|
iwlwifi-objs += iwl-notif-wait.o
|
||||||
iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
|
iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
|
||||||
|
|
||||||
|
|
||||||
|
iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
|
||||||
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
|
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
|
||||||
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
|
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
|
||||||
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
|
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
|
||||||
|
@ -157,7 +157,6 @@ static struct iwl_lib_ops iwl1000_lib = {
|
|||||||
|
|
||||||
static const struct iwl_base_params iwl1000_base_params = {
|
static const struct iwl_base_params iwl1000_base_params = {
|
||||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
|
||||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||||
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
|
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
|
||||||
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
|
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
|
||||||
|
@ -86,7 +86,6 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
|
|||||||
{
|
{
|
||||||
iwl_rf_config(priv);
|
iwl_rf_config(priv);
|
||||||
|
|
||||||
if (cfg(priv)->iq_invert)
|
|
||||||
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
|
iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
|
||||||
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
|
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
|
||||||
}
|
}
|
||||||
@ -172,7 +171,6 @@ static struct iwl_lib_ops iwl2030_lib = {
|
|||||||
static const struct iwl_base_params iwl2000_base_params = {
|
static const struct iwl_base_params iwl2000_base_params = {
|
||||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
|
||||||
.pll_cfg_val = 0,
|
.pll_cfg_val = 0,
|
||||||
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
|
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
|
||||||
.shadow_ram_support = true,
|
.shadow_ram_support = true,
|
||||||
@ -191,7 +189,6 @@ static const struct iwl_base_params iwl2000_base_params = {
|
|||||||
static const struct iwl_base_params iwl2030_base_params = {
|
static const struct iwl_base_params iwl2030_base_params = {
|
||||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
|
||||||
.pll_cfg_val = 0,
|
.pll_cfg_val = 0,
|
||||||
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
|
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
|
||||||
.shadow_ram_support = true,
|
.shadow_ram_support = true,
|
||||||
@ -234,8 +231,7 @@ static const struct iwl_bt_params iwl2030_bt_params = {
|
|||||||
.base_params = &iwl2000_base_params, \
|
.base_params = &iwl2000_base_params, \
|
||||||
.need_temp_offset_calib = true, \
|
.need_temp_offset_calib = true, \
|
||||||
.temp_offset_v2 = true, \
|
.temp_offset_v2 = true, \
|
||||||
.led_mode = IWL_LED_RF_STATE, \
|
.led_mode = IWL_LED_RF_STATE
|
||||||
.iq_invert = true \
|
|
||||||
|
|
||||||
const struct iwl_cfg iwl2000_2bgn_cfg = {
|
const struct iwl_cfg iwl2000_2bgn_cfg = {
|
||||||
.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
|
.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
|
||||||
@ -264,8 +260,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
|
|||||||
.need_temp_offset_calib = true, \
|
.need_temp_offset_calib = true, \
|
||||||
.temp_offset_v2 = true, \
|
.temp_offset_v2 = true, \
|
||||||
.led_mode = IWL_LED_RF_STATE, \
|
.led_mode = IWL_LED_RF_STATE, \
|
||||||
.adv_pm = true, \
|
.adv_pm = true
|
||||||
.iq_invert = true \
|
|
||||||
|
|
||||||
const struct iwl_cfg iwl2030_2bgn_cfg = {
|
const struct iwl_cfg iwl2030_2bgn_cfg = {
|
||||||
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
|
.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
|
||||||
@ -288,8 +283,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
|
|||||||
.temp_offset_v2 = true, \
|
.temp_offset_v2 = true, \
|
||||||
.led_mode = IWL_LED_RF_STATE, \
|
.led_mode = IWL_LED_RF_STATE, \
|
||||||
.adv_pm = true, \
|
.adv_pm = true, \
|
||||||
.rx_with_siso_diversity = true, \
|
.rx_with_siso_diversity = true
|
||||||
.iq_invert = true \
|
|
||||||
|
|
||||||
const struct iwl_cfg iwl105_bgn_cfg = {
|
const struct iwl_cfg iwl105_bgn_cfg = {
|
||||||
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
|
.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
|
||||||
@ -319,8 +313,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
|
|||||||
.temp_offset_v2 = true, \
|
.temp_offset_v2 = true, \
|
||||||
.led_mode = IWL_LED_RF_STATE, \
|
.led_mode = IWL_LED_RF_STATE, \
|
||||||
.adv_pm = true, \
|
.adv_pm = true, \
|
||||||
.rx_with_siso_diversity = true, \
|
.rx_with_siso_diversity = true
|
||||||
.iq_invert = true \
|
|
||||||
|
|
||||||
const struct iwl_cfg iwl135_bgn_cfg = {
|
const struct iwl_cfg iwl135_bgn_cfg = {
|
||||||
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
|
.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
|
||||||
|
@ -308,7 +308,6 @@ static struct iwl_lib_ops iwl5150_lib = {
|
|||||||
static const struct iwl_base_params iwl5000_base_params = {
|
static const struct iwl_base_params iwl5000_base_params = {
|
||||||
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
|
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
|
||||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
|
||||||
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
|
.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
|
||||||
.led_compensation = 51,
|
.led_compensation = 51,
|
||||||
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
|
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
|
||||||
|
@ -269,7 +269,6 @@ static struct iwl_lib_ops iwl6030_lib = {
|
|||||||
static const struct iwl_base_params iwl6000_base_params = {
|
static const struct iwl_base_params iwl6000_base_params = {
|
||||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
|
||||||
.pll_cfg_val = 0,
|
.pll_cfg_val = 0,
|
||||||
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
|
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
|
||||||
.shadow_ram_support = true,
|
.shadow_ram_support = true,
|
||||||
@ -286,7 +285,6 @@ static const struct iwl_base_params iwl6000_base_params = {
|
|||||||
static const struct iwl_base_params iwl6050_base_params = {
|
static const struct iwl_base_params iwl6050_base_params = {
|
||||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
|
||||||
.pll_cfg_val = 0,
|
.pll_cfg_val = 0,
|
||||||
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
|
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
|
||||||
.shadow_ram_support = true,
|
.shadow_ram_support = true,
|
||||||
@ -303,7 +301,6 @@ static const struct iwl_base_params iwl6050_base_params = {
|
|||||||
static const struct iwl_base_params iwl6000_g2_base_params = {
|
static const struct iwl_base_params iwl6000_g2_base_params = {
|
||||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
|
||||||
.pll_cfg_val = 0,
|
.pll_cfg_val = 0,
|
||||||
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
|
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
|
||||||
.shadow_ram_support = true,
|
.shadow_ram_support = true,
|
||||||
|
@ -103,9 +103,6 @@
|
|||||||
/* EEPROM */
|
/* EEPROM */
|
||||||
#define IWLAGN_EEPROM_IMG_SIZE 2048
|
#define IWLAGN_EEPROM_IMG_SIZE 2048
|
||||||
|
|
||||||
#define IWLAGN_CMD_FIFO_NUM 7
|
|
||||||
#define IWLAGN_NUM_QUEUES 20
|
#define IWLAGN_NUM_QUEUES 20
|
||||||
#define IWLAGN_NUM_AMPDU_QUEUES 9
|
|
||||||
#define IWLAGN_FIRST_AMPDU_QUEUE 11
|
|
||||||
|
|
||||||
#endif /* __iwl_agn_hw_h__ */
|
#endif /* __iwl_agn_hw_h__ */
|
||||||
|
@ -228,7 +228,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
|||||||
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
|
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
|
||||||
IWL_SCD_MGMT_MSK;
|
IWL_SCD_MGMT_MSK;
|
||||||
if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
|
if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
|
||||||
(priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
|
(priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
|
||||||
flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
|
flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
|
||||||
IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
|
IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
|
||||||
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
|
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
|
||||||
@ -615,7 +615,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
|
|||||||
struct iwl_bt_uart_msg *uart_msg)
|
struct iwl_bt_uart_msg *uart_msg)
|
||||||
{
|
{
|
||||||
IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
|
IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
|
||||||
"Update Req = 0x%X",
|
"Update Req = 0x%X\n",
|
||||||
(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
|
(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
|
||||||
BT_UART_MSG_FRAME1MSGTYPE_POS,
|
BT_UART_MSG_FRAME1MSGTYPE_POS,
|
||||||
(BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
|
(BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
|
||||||
@ -624,7 +624,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
|
|||||||
BT_UART_MSG_FRAME1UPDATEREQ_POS);
|
BT_UART_MSG_FRAME1UPDATEREQ_POS);
|
||||||
|
|
||||||
IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
|
IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
|
||||||
"Chl_SeqN = 0x%X, In band = 0x%X",
|
"Chl_SeqN = 0x%X, In band = 0x%X\n",
|
||||||
(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
|
(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
|
||||||
BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
|
BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
|
||||||
(BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
|
(BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
|
||||||
@ -635,7 +635,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
|
|||||||
BT_UART_MSG_FRAME2INBAND_POS);
|
BT_UART_MSG_FRAME2INBAND_POS);
|
||||||
|
|
||||||
IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
|
IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
|
||||||
"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
|
"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
|
||||||
(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
|
(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
|
||||||
BT_UART_MSG_FRAME3SCOESCO_POS,
|
BT_UART_MSG_FRAME3SCOESCO_POS,
|
||||||
(BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
|
(BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
|
||||||
@ -649,12 +649,12 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
|
|||||||
(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
|
(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
|
||||||
BT_UART_MSG_FRAME3OBEX_POS);
|
BT_UART_MSG_FRAME3OBEX_POS);
|
||||||
|
|
||||||
IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
|
IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
|
||||||
(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
|
(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
|
||||||
BT_UART_MSG_FRAME4IDLEDURATION_POS);
|
BT_UART_MSG_FRAME4IDLEDURATION_POS);
|
||||||
|
|
||||||
IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
|
IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
|
||||||
"eSCO Retransmissions = 0x%X",
|
"eSCO Retransmissions = 0x%X\n",
|
||||||
(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
|
(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
|
||||||
BT_UART_MSG_FRAME5TXACTIVITY_POS,
|
BT_UART_MSG_FRAME5TXACTIVITY_POS,
|
||||||
(BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
|
(BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
|
||||||
@ -662,14 +662,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
|
|||||||
(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
|
(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
|
||||||
BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
|
BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
|
||||||
|
|
||||||
IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
|
IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
|
||||||
(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
|
(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
|
||||||
BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
|
BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
|
||||||
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
|
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
|
||||||
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
|
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
|
||||||
|
|
||||||
IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
|
IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
|
||||||
"0x%X, Inquiry = 0x%X, Connectable = 0x%X",
|
"0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
|
||||||
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
|
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
|
||||||
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
|
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
|
||||||
(BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
|
(BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
|
||||||
@ -856,7 +856,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
|
|||||||
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
||||||
{
|
{
|
||||||
bool is_single = is_single_rx_stream(priv);
|
bool is_single = is_single_rx_stream(priv);
|
||||||
bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
|
bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
|
||||||
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
|
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
|
||||||
u32 active_chains;
|
u32 active_chains;
|
||||||
u16 rx_chain;
|
u16 rx_chain;
|
||||||
@ -1298,6 +1298,12 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (test_bit(STATUS_FW_ERROR, &priv->status)) {
|
||||||
|
IWL_ERR(priv, "Command %s failed: FW Error\n",
|
||||||
|
get_cmd_string(cmd->id));
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Synchronous commands from this op-mode must hold
|
* Synchronous commands from this op-mode must hold
|
||||||
* the mutex, this ensures we don't try to send two
|
* the mutex, this ensures we don't try to send two
|
||||||
|
@ -969,7 +969,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
|
|||||||
(tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
|
(tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
|
||||||
(tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
|
(tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
|
||||||
(tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
|
(tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
|
||||||
(tbl_type.ant_type != info->antenna_sel_tx) ||
|
(tbl_type.ant_type != info->status.antenna) ||
|
||||||
(!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
|
(!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
|
||||||
(!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
|
(!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
|
||||||
(rs_index != mac_index)) {
|
(rs_index != mac_index)) {
|
||||||
@ -2166,7 +2166,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
|
|||||||
(lq_sta->total_success > lq_sta->max_success_limit) ||
|
(lq_sta->total_success > lq_sta->max_success_limit) ||
|
||||||
((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
|
((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
|
||||||
&& (flush_interval_passed))) {
|
&& (flush_interval_passed))) {
|
||||||
IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
|
IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n",
|
||||||
lq_sta->total_failed,
|
lq_sta->total_failed,
|
||||||
lq_sta->total_success,
|
lq_sta->total_success,
|
||||||
flush_interval_passed);
|
flush_interval_passed);
|
||||||
|
@ -794,7 +794,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = (void *)hdr - rxb_addr(rxb);
|
offset = (void *)hdr - rxb_addr(rxb) + rxb_offset(rxb);
|
||||||
p = rxb_steal_page(rxb);
|
p = rxb_steal_page(rxb);
|
||||||
skb_add_rx_frag(skb, 0, p, offset, len, len);
|
skb_add_rx_frag(skb, 0, p, offset, len, len);
|
||||||
|
|
||||||
@ -970,7 +970,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
|
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
|
||||||
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
|
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
|
||||||
phy_res->cfg_phy_cnt);
|
phy_res->cfg_phy_cnt);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1134,9 +1134,6 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
|
|||||||
handlers[REPLY_COMPRESSED_BA] =
|
handlers[REPLY_COMPRESSED_BA] =
|
||||||
iwlagn_rx_reply_compressed_ba;
|
iwlagn_rx_reply_compressed_ba;
|
||||||
|
|
||||||
/* init calibration handlers */
|
|
||||||
priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
|
|
||||||
iwlagn_rx_calib_result;
|
|
||||||
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
|
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
|
||||||
|
|
||||||
/* set up notification wait support */
|
/* set up notification wait support */
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
*
|
*
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
|
|
||||||
|
#include <linux/etherdevice.h>
|
||||||
#include "iwl-dev.h"
|
#include "iwl-dev.h"
|
||||||
#include "iwl-agn.h"
|
#include "iwl-agn.h"
|
||||||
#include "iwl-core.h"
|
#include "iwl-core.h"
|
||||||
@ -59,9 +60,12 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
|
|||||||
__le32 old_filter = send->filter_flags;
|
__le32 old_filter = send->filter_flags;
|
||||||
u8 old_dev_type = send->dev_type;
|
u8 old_dev_type = send->dev_type;
|
||||||
int ret;
|
int ret;
|
||||||
|
static const u8 deactivate_cmd[] = {
|
||||||
|
REPLY_WIPAN_DEACTIVATION_COMPLETE
|
||||||
|
};
|
||||||
|
|
||||||
iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
|
iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
|
||||||
REPLY_WIPAN_DEACTIVATION_COMPLETE,
|
deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
|
|
||||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||||
@ -186,6 +190,109 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
|
||||||
|
{
|
||||||
|
u16 new_val;
|
||||||
|
u16 beacon_factor;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If mac80211 hasn't given us a beacon interval, program
|
||||||
|
* the default into the device (not checking this here
|
||||||
|
* would cause the adjustment below to return the maximum
|
||||||
|
* value, which may break PAN.)
|
||||||
|
*/
|
||||||
|
if (!beacon_val)
|
||||||
|
return DEFAULT_BEACON_INTERVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the beacon interval we obtained from the peer
|
||||||
|
* is too large, we'll have to wake up more often
|
||||||
|
* (and in IBSS case, we'll beacon too much)
|
||||||
|
*
|
||||||
|
* For example, if max_beacon_val is 4096, and the
|
||||||
|
* requested beacon interval is 7000, we'll have to
|
||||||
|
* use 3500 to be able to wake up on the beacons.
|
||||||
|
*
|
||||||
|
* This could badly influence beacon detection stats.
|
||||||
|
*/
|
||||||
|
|
||||||
|
beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
|
||||||
|
new_val = beacon_val / beacon_factor;
|
||||||
|
|
||||||
|
if (!new_val)
|
||||||
|
new_val = max_beacon_val;
|
||||||
|
|
||||||
|
return new_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int iwl_send_rxon_timing(struct iwl_priv *priv,
|
||||||
|
struct iwl_rxon_context *ctx)
|
||||||
|
{
|
||||||
|
u64 tsf;
|
||||||
|
s32 interval_tm, rem;
|
||||||
|
struct ieee80211_conf *conf = NULL;
|
||||||
|
u16 beacon_int;
|
||||||
|
struct ieee80211_vif *vif = ctx->vif;
|
||||||
|
|
||||||
|
conf = &priv->hw->conf;
|
||||||
|
|
||||||
|
lockdep_assert_held(&priv->mutex);
|
||||||
|
|
||||||
|
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
|
||||||
|
|
||||||
|
ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
|
||||||
|
ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
|
||||||
|
|
||||||
|
beacon_int = vif ? vif->bss_conf.beacon_int : 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TODO: For IBSS we need to get atim_window from mac80211,
|
||||||
|
* for now just always use 0
|
||||||
|
*/
|
||||||
|
ctx->timing.atim_window = 0;
|
||||||
|
|
||||||
|
if (ctx->ctxid == IWL_RXON_CTX_PAN &&
|
||||||
|
(!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
|
||||||
|
iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
|
||||||
|
priv->contexts[IWL_RXON_CTX_BSS].vif &&
|
||||||
|
priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
|
||||||
|
ctx->timing.beacon_interval =
|
||||||
|
priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
|
||||||
|
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
|
||||||
|
} else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
|
||||||
|
iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
|
||||||
|
priv->contexts[IWL_RXON_CTX_PAN].vif &&
|
||||||
|
priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
|
||||||
|
(!iwl_is_associated_ctx(ctx) || !ctx->vif ||
|
||||||
|
!ctx->vif->bss_conf.beacon_int)) {
|
||||||
|
ctx->timing.beacon_interval =
|
||||||
|
priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
|
||||||
|
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
|
||||||
|
} else {
|
||||||
|
beacon_int = iwl_adjust_beacon_interval(beacon_int,
|
||||||
|
IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
|
||||||
|
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx->beacon_int = beacon_int;
|
||||||
|
|
||||||
|
tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
|
||||||
|
interval_tm = beacon_int * TIME_UNIT;
|
||||||
|
rem = do_div(tsf, interval_tm);
|
||||||
|
ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
|
||||||
|
|
||||||
|
ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
|
||||||
|
|
||||||
|
IWL_DEBUG_ASSOC(priv,
|
||||||
|
"beacon interval %d beacon timer %d beacon tim %d\n",
|
||||||
|
le16_to_cpu(ctx->timing.beacon_interval),
|
||||||
|
le32_to_cpu(ctx->timing.beacon_init_val),
|
||||||
|
le16_to_cpu(ctx->timing.atim_window));
|
||||||
|
|
||||||
|
return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
|
||||||
|
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
|
||||||
|
}
|
||||||
|
|
||||||
static int iwlagn_rxon_disconn(struct iwl_priv *priv,
|
static int iwlagn_rxon_disconn(struct iwl_priv *priv,
|
||||||
struct iwl_rxon_context *ctx)
|
struct iwl_rxon_context *ctx)
|
||||||
{
|
{
|
||||||
@ -309,7 +416,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|||||||
int slot0 = 300, slot1 = 0;
|
int slot0 = 300, slot1 = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS))
|
if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||||
@ -394,6 +501,154 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
|
||||||
|
struct iwl_rxon_context *ctx, int hw_decrypt)
|
||||||
|
{
|
||||||
|
struct iwl_rxon_cmd *rxon = &ctx->staging;
|
||||||
|
|
||||||
|
if (hw_decrypt)
|
||||||
|
rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
|
||||||
|
else
|
||||||
|
rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* validate RXON structure is valid */
|
||||||
|
static int iwl_check_rxon_cmd(struct iwl_priv *priv,
|
||||||
|
struct iwl_rxon_context *ctx)
|
||||||
|
{
|
||||||
|
struct iwl_rxon_cmd *rxon = &ctx->staging;
|
||||||
|
u32 errors = 0;
|
||||||
|
|
||||||
|
if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
|
||||||
|
if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
|
||||||
|
IWL_WARN(priv, "check 2.4G: wrong narrow\n");
|
||||||
|
errors |= BIT(0);
|
||||||
|
}
|
||||||
|
if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
|
||||||
|
IWL_WARN(priv, "check 2.4G: wrong radar\n");
|
||||||
|
errors |= BIT(1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
|
||||||
|
IWL_WARN(priv, "check 5.2G: not short slot!\n");
|
||||||
|
errors |= BIT(2);
|
||||||
|
}
|
||||||
|
if (rxon->flags & RXON_FLG_CCK_MSK) {
|
||||||
|
IWL_WARN(priv, "check 5.2G: CCK!\n");
|
||||||
|
errors |= BIT(3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
|
||||||
|
IWL_WARN(priv, "mac/bssid mcast!\n");
|
||||||
|
errors |= BIT(4);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* make sure basic rates 6Mbps and 1Mbps are supported */
|
||||||
|
if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
|
||||||
|
(rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
|
||||||
|
IWL_WARN(priv, "neither 1 nor 6 are basic\n");
|
||||||
|
errors |= BIT(5);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (le16_to_cpu(rxon->assoc_id) > 2007) {
|
||||||
|
IWL_WARN(priv, "aid > 2007\n");
|
||||||
|
errors |= BIT(6);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
|
||||||
|
== (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
|
||||||
|
IWL_WARN(priv, "CCK and short slot\n");
|
||||||
|
errors |= BIT(7);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
|
||||||
|
== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
|
||||||
|
IWL_WARN(priv, "CCK and auto detect");
|
||||||
|
errors |= BIT(8);
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
|
||||||
|
RXON_FLG_TGG_PROTECT_MSK)) ==
|
||||||
|
RXON_FLG_TGG_PROTECT_MSK) {
|
||||||
|
IWL_WARN(priv, "TGg but no auto-detect\n");
|
||||||
|
errors |= BIT(9);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rxon->channel == 0) {
|
||||||
|
IWL_WARN(priv, "zero channel is invalid\n");
|
||||||
|
errors |= BIT(10);
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN(errors, "Invalid RXON (%#x), channel %d",
|
||||||
|
errors, le16_to_cpu(rxon->channel));
|
||||||
|
|
||||||
|
return errors ? -EINVAL : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
|
||||||
|
* @priv: staging_rxon is compared to active_rxon
|
||||||
|
*
|
||||||
|
* If the RXON structure is changing enough to require a new tune,
|
||||||
|
* or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
|
||||||
|
* a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
|
||||||
|
*/
|
||||||
|
static int iwl_full_rxon_required(struct iwl_priv *priv,
|
||||||
|
struct iwl_rxon_context *ctx)
|
||||||
|
{
|
||||||
|
const struct iwl_rxon_cmd *staging = &ctx->staging;
|
||||||
|
const struct iwl_rxon_cmd *active = &ctx->active;
|
||||||
|
|
||||||
|
#define CHK(cond) \
|
||||||
|
if ((cond)) { \
|
||||||
|
IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
|
||||||
|
return 1; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define CHK_NEQ(c1, c2) \
|
||||||
|
if ((c1) != (c2)) { \
|
||||||
|
IWL_DEBUG_INFO(priv, "need full RXON - " \
|
||||||
|
#c1 " != " #c2 " - %d != %d\n", \
|
||||||
|
(c1), (c2)); \
|
||||||
|
return 1; \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* These items are only settable from the full RXON command */
|
||||||
|
CHK(!iwl_is_associated_ctx(ctx));
|
||||||
|
CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
|
||||||
|
CHK(compare_ether_addr(staging->node_addr, active->node_addr));
|
||||||
|
CHK(compare_ether_addr(staging->wlap_bssid_addr,
|
||||||
|
active->wlap_bssid_addr));
|
||||||
|
CHK_NEQ(staging->dev_type, active->dev_type);
|
||||||
|
CHK_NEQ(staging->channel, active->channel);
|
||||||
|
CHK_NEQ(staging->air_propagation, active->air_propagation);
|
||||||
|
CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
|
||||||
|
active->ofdm_ht_single_stream_basic_rates);
|
||||||
|
CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
|
||||||
|
active->ofdm_ht_dual_stream_basic_rates);
|
||||||
|
CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
|
||||||
|
active->ofdm_ht_triple_stream_basic_rates);
|
||||||
|
CHK_NEQ(staging->assoc_id, active->assoc_id);
|
||||||
|
|
||||||
|
/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
|
||||||
|
* be updated with the RXON_ASSOC command -- however only some
|
||||||
|
* flag transitions are allowed using RXON_ASSOC */
|
||||||
|
|
||||||
|
/* Check if we are not switching bands */
|
||||||
|
CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
|
||||||
|
active->flags & RXON_FLG_BAND_24G_MSK);
|
||||||
|
|
||||||
|
/* Check if we are switching association toggle */
|
||||||
|
CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
|
||||||
|
active->filter_flags & RXON_FILTER_ASSOC_MSK);
|
||||||
|
|
||||||
|
#undef CHK
|
||||||
|
#undef CHK_NEQ
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iwlagn_commit_rxon - commit staging_rxon to hardware
|
* iwlagn_commit_rxon - commit staging_rxon to hardware
|
||||||
*
|
*
|
||||||
@ -547,7 +802,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
|
|||||||
const struct iwl_channel_info *ch_info;
|
const struct iwl_channel_info *ch_info;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed);
|
IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
|
||||||
|
|
||||||
mutex_lock(&priv->mutex);
|
mutex_lock(&priv->mutex);
|
||||||
|
|
||||||
|
@ -40,6 +40,17 @@
|
|||||||
#include "iwl-agn.h"
|
#include "iwl-agn.h"
|
||||||
#include "iwl-trans.h"
|
#include "iwl-trans.h"
|
||||||
|
|
||||||
|
static const u8 tid_to_ac[] = {
|
||||||
|
IEEE80211_AC_BE,
|
||||||
|
IEEE80211_AC_BK,
|
||||||
|
IEEE80211_AC_BK,
|
||||||
|
IEEE80211_AC_BE,
|
||||||
|
IEEE80211_AC_VI,
|
||||||
|
IEEE80211_AC_VI,
|
||||||
|
IEEE80211_AC_VO,
|
||||||
|
IEEE80211_AC_VO,
|
||||||
|
};
|
||||||
|
|
||||||
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
|
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
|
||||||
struct ieee80211_tx_info *info,
|
struct ieee80211_tx_info *info,
|
||||||
__le16 fc, __le32 *tx_flags)
|
__le16 fc, __le32 *tx_flags)
|
||||||
@ -293,6 +304,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||||||
u16 len, seq_number = 0;
|
u16 len, seq_number = 0;
|
||||||
u8 sta_id, tid = IWL_MAX_TID_COUNT;
|
u8 sta_id, tid = IWL_MAX_TID_COUNT;
|
||||||
bool is_agg = false;
|
bool is_agg = false;
|
||||||
|
int txq_id;
|
||||||
|
|
||||||
if (info->control.vif)
|
if (info->control.vif)
|
||||||
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
|
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
|
||||||
@ -435,7 +447,27 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||||||
/* Copy MAC header from skb into command buffer */
|
/* Copy MAC header from skb into command buffer */
|
||||||
memcpy(tx_cmd->hdr, hdr, hdr_len);
|
memcpy(tx_cmd->hdr, hdr, hdr_len);
|
||||||
|
|
||||||
if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid))
|
if (is_agg)
|
||||||
|
txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
|
||||||
|
else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
|
||||||
|
/*
|
||||||
|
* Send this frame after DTIM -- there's a special queue
|
||||||
|
* reserved for this for contexts that support AP mode.
|
||||||
|
*/
|
||||||
|
txq_id = ctx->mcast_queue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The microcode will clear the more data
|
||||||
|
* bit in the last frame it transmits.
|
||||||
|
*/
|
||||||
|
hdr->frame_control |=
|
||||||
|
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
||||||
|
} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
|
||||||
|
txq_id = IWL_AUX_QUEUE;
|
||||||
|
else
|
||||||
|
txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
|
||||||
|
|
||||||
|
if (iwl_trans_tx(trans(priv), skb, dev_cmd, txq_id))
|
||||||
goto drop_unlock_sta;
|
goto drop_unlock_sta;
|
||||||
|
|
||||||
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
|
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
|
||||||
@ -464,11 +496,32 @@ drop_unlock_priv:
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int ac)
|
||||||
|
{
|
||||||
|
int q;
|
||||||
|
|
||||||
|
for (q = IWLAGN_FIRST_AMPDU_QUEUE;
|
||||||
|
q < cfg(priv)->base_params->num_of_queues; q++) {
|
||||||
|
if (!test_and_set_bit(q, priv->agg_q_alloc)) {
|
||||||
|
priv->queue_to_ac[q] = ac;
|
||||||
|
return q;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENOSPC;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
|
||||||
|
{
|
||||||
|
clear_bit(q, priv->agg_q_alloc);
|
||||||
|
priv->queue_to_ac[q] = IWL_INVALID_AC;
|
||||||
|
}
|
||||||
|
|
||||||
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||||
struct ieee80211_sta *sta, u16 tid)
|
struct ieee80211_sta *sta, u16 tid)
|
||||||
{
|
{
|
||||||
struct iwl_tid_data *tid_data;
|
struct iwl_tid_data *tid_data;
|
||||||
int sta_id;
|
int sta_id, txq_id;
|
||||||
|
|
||||||
sta_id = iwl_sta_id(sta);
|
sta_id = iwl_sta_id(sta);
|
||||||
|
|
||||||
@ -480,6 +533,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|||||||
spin_lock_bh(&priv->sta_lock);
|
spin_lock_bh(&priv->sta_lock);
|
||||||
|
|
||||||
tid_data = &priv->tid_data[sta_id][tid];
|
tid_data = &priv->tid_data[sta_id][tid];
|
||||||
|
txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
|
||||||
|
|
||||||
switch (priv->tid_data[sta_id][tid].agg.state) {
|
switch (priv->tid_data[sta_id][tid].agg.state) {
|
||||||
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
||||||
@ -504,9 +558,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|||||||
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
|
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
|
||||||
|
|
||||||
/* There are still packets for this RA / TID in the HW */
|
/* There are still packets for this RA / TID in the HW */
|
||||||
if (tid_data->agg.ssn != tid_data->next_reclaimed) {
|
if (!test_bit(txq_id, priv->agg_q_alloc)) {
|
||||||
|
IWL_DEBUG_TX_QUEUES(priv,
|
||||||
|
"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
|
||||||
|
sta_id, tid, txq_id);
|
||||||
|
} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
|
||||||
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
|
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
|
||||||
"next_recl = %d",
|
"next_recl = %d\n",
|
||||||
tid_data->agg.ssn,
|
tid_data->agg.ssn,
|
||||||
tid_data->next_reclaimed);
|
tid_data->next_reclaimed);
|
||||||
priv->tid_data[sta_id][tid].agg.state =
|
priv->tid_data[sta_id][tid].agg.state =
|
||||||
@ -515,14 +573,17 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
|
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
|
||||||
tid_data->agg.ssn);
|
tid_data->agg.ssn);
|
||||||
turn_off:
|
turn_off:
|
||||||
priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
|
priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
|
||||||
|
|
||||||
spin_unlock_bh(&priv->sta_lock);
|
spin_unlock_bh(&priv->sta_lock);
|
||||||
|
|
||||||
iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
|
if (test_bit(txq_id, priv->agg_q_alloc)) {
|
||||||
|
iwl_trans_tx_agg_disable(trans(priv), txq_id);
|
||||||
|
iwlagn_dealloc_agg_txq(priv, txq_id);
|
||||||
|
}
|
||||||
|
|
||||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||||
|
|
||||||
@ -533,8 +594,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|||||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
|
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
|
||||||
{
|
{
|
||||||
struct iwl_tid_data *tid_data;
|
struct iwl_tid_data *tid_data;
|
||||||
int sta_id;
|
int sta_id, txq_id, ret;
|
||||||
int ret;
|
|
||||||
|
|
||||||
IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
|
IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
|
||||||
sta->addr, tid);
|
sta->addr, tid);
|
||||||
@ -552,36 +612,37 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
txq_id = iwlagn_alloc_agg_txq(priv, tid_to_ac[tid]);
|
||||||
|
if (txq_id < 0) {
|
||||||
|
IWL_DEBUG_TX_QUEUES(priv,
|
||||||
|
"No free aggregation queue for %pM/%d\n",
|
||||||
|
sta->addr, tid);
|
||||||
|
return txq_id;
|
||||||
|
}
|
||||||
|
|
||||||
ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
|
ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock_bh(&priv->sta_lock);
|
spin_lock_bh(&priv->sta_lock);
|
||||||
|
|
||||||
tid_data = &priv->tid_data[sta_id][tid];
|
tid_data = &priv->tid_data[sta_id][tid];
|
||||||
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
|
tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
|
||||||
|
tid_data->agg.txq_id = txq_id;
|
||||||
|
|
||||||
*ssn = tid_data->agg.ssn;
|
*ssn = tid_data->agg.ssn;
|
||||||
|
|
||||||
ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
|
|
||||||
if (ret) {
|
|
||||||
spin_unlock_bh(&priv->sta_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*ssn == tid_data->next_reclaimed) {
|
if (*ssn == tid_data->next_reclaimed) {
|
||||||
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
|
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
|
||||||
tid_data->agg.ssn);
|
tid_data->agg.ssn);
|
||||||
tid_data->agg.state = IWL_AGG_ON;
|
tid_data->agg.state = IWL_AGG_ON;
|
||||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||||
} else {
|
} else {
|
||||||
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
|
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
|
||||||
"next_reclaimed = %d",
|
"next_reclaimed = %d\n",
|
||||||
tid_data->agg.ssn,
|
tid_data->agg.ssn,
|
||||||
tid_data->next_reclaimed);
|
tid_data->next_reclaimed);
|
||||||
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
|
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_bh(&priv->sta_lock);
|
spin_unlock_bh(&priv->sta_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -592,15 +653,20 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|||||||
{
|
{
|
||||||
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
|
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
|
||||||
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
|
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
|
||||||
|
int q, fifo;
|
||||||
u16 ssn;
|
u16 ssn;
|
||||||
|
|
||||||
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
|
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
|
||||||
|
|
||||||
spin_lock_bh(&priv->sta_lock);
|
spin_lock_bh(&priv->sta_lock);
|
||||||
ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
|
ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
|
||||||
|
q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
|
||||||
spin_unlock_bh(&priv->sta_lock);
|
spin_unlock_bh(&priv->sta_lock);
|
||||||
|
|
||||||
iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
|
fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
|
||||||
|
|
||||||
|
iwl_trans_tx_agg_setup(trans(priv), q, fifo,
|
||||||
|
sta_priv->sta_id, tid,
|
||||||
buf_size, ssn);
|
buf_size, ssn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -666,7 +732,9 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
|
|||||||
IWL_DEBUG_TX_QUEUES(priv,
|
IWL_DEBUG_TX_QUEUES(priv,
|
||||||
"Can continue DELBA flow ssn = next_recl ="
|
"Can continue DELBA flow ssn = next_recl ="
|
||||||
" %d", tid_data->next_reclaimed);
|
" %d", tid_data->next_reclaimed);
|
||||||
iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
|
iwl_trans_tx_agg_disable(trans(priv),
|
||||||
|
tid_data->agg.txq_id);
|
||||||
|
iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
|
||||||
tid_data->agg.state = IWL_AGG_OFF;
|
tid_data->agg.state = IWL_AGG_OFF;
|
||||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
|
||||||
}
|
}
|
||||||
@ -711,9 +779,9 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
|
|||||||
static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
|
static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
|
||||||
struct ieee80211_tx_info *info)
|
struct ieee80211_tx_info *info)
|
||||||
{
|
{
|
||||||
struct ieee80211_tx_rate *r = &info->control.rates[0];
|
struct ieee80211_tx_rate *r = &info->status.rates[0];
|
||||||
|
|
||||||
info->antenna_sel_tx =
|
info->status.antenna =
|
||||||
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
|
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
|
||||||
if (rate_n_flags & RATE_MCS_HT_MSK)
|
if (rate_n_flags & RATE_MCS_HT_MSK)
|
||||||
r->flags |= IEEE80211_TX_RC_MCS;
|
r->flags |= IEEE80211_TX_RC_MCS;
|
||||||
@ -1005,6 +1073,29 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
|
||||||
|
int txq_id, int ssn, struct sk_buff_head *skbs)
|
||||||
|
{
|
||||||
|
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
|
||||||
|
tid != IWL_TID_NON_QOS &&
|
||||||
|
txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
|
||||||
|
/*
|
||||||
|
* FIXME: this is a uCode bug which need to be addressed,
|
||||||
|
* log the information and return for now.
|
||||||
|
* Since it is can possibly happen very often and in order
|
||||||
|
* not to fill the syslog, don't use IWL_ERR or IWL_WARN
|
||||||
|
*/
|
||||||
|
IWL_DEBUG_TX_QUEUES(priv,
|
||||||
|
"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
|
||||||
|
txq_id, sta_id, tid,
|
||||||
|
priv->tid_data[sta_id][tid].agg.txq_id);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
iwl_trans_reclaim(trans(priv), txq_id, ssn, skbs);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
||||||
struct iwl_device_cmd *cmd)
|
struct iwl_device_cmd *cmd)
|
||||||
{
|
{
|
||||||
@ -1059,13 +1150,12 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
|
|||||||
if (tid != IWL_TID_NON_QOS) {
|
if (tid != IWL_TID_NON_QOS) {
|
||||||
priv->tid_data[sta_id][tid].next_reclaimed =
|
priv->tid_data[sta_id][tid].next_reclaimed =
|
||||||
next_reclaimed;
|
next_reclaimed;
|
||||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
|
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||||
next_reclaimed);
|
next_reclaimed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*we can free until ssn % q.n_bd not inclusive */
|
/*we can free until ssn % q.n_bd not inclusive */
|
||||||
WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid,
|
WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
|
||||||
txq_id, ssn, &skbs));
|
|
||||||
iwlagn_check_ratid_empty(priv, sta_id, tid);
|
iwlagn_check_ratid_empty(priv, sta_id, tid);
|
||||||
freed = 0;
|
freed = 0;
|
||||||
|
|
||||||
@ -1183,7 +1273,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
|
|||||||
/* Release all TFDs before the SSN, i.e. all TFDs in front of
|
/* Release all TFDs before the SSN, i.e. all TFDs in front of
|
||||||
* block-ack window (we assume that they've been successfully
|
* block-ack window (we assume that they've been successfully
|
||||||
* transmitted ... if not, it's too late anyway). */
|
* transmitted ... if not, it's too late anyway). */
|
||||||
if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
|
if (iwl_reclaim(priv, sta_id, tid, scd_flow,
|
||||||
ba_resp_scd_ssn, &reclaimed_skbs)) {
|
ba_resp_scd_ssn, &reclaimed_skbs)) {
|
||||||
spin_unlock(&priv->sta_lock);
|
spin_unlock(&priv->sta_lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -26,6 +26,9 @@
|
|||||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||||
*
|
*
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
@ -379,7 +382,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
|
|||||||
u32 num_wraps; /* # times uCode wrapped to top of log */
|
u32 num_wraps; /* # times uCode wrapped to top of log */
|
||||||
u32 next_entry; /* index of next entry to be written by uCode */
|
u32 next_entry; /* index of next entry to be written by uCode */
|
||||||
|
|
||||||
base = priv->shrd->device_pointers.log_event_table;
|
base = priv->device_pointers.log_event_table;
|
||||||
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||||
iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read));
|
iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read));
|
||||||
|
|
||||||
@ -488,6 +491,93 @@ static void iwl_bg_tx_flush(struct work_struct *work)
|
|||||||
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
|
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* queue/FIFO/AC mapping definitions
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define IWL_TX_FIFO_BK 0 /* shared */
|
||||||
|
#define IWL_TX_FIFO_BE 1
|
||||||
|
#define IWL_TX_FIFO_VI 2 /* shared */
|
||||||
|
#define IWL_TX_FIFO_VO 3
|
||||||
|
#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
|
||||||
|
#define IWL_TX_FIFO_BE_IPAN 4
|
||||||
|
#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
|
||||||
|
#define IWL_TX_FIFO_VO_IPAN 5
|
||||||
|
/* re-uses the VO FIFO, uCode will properly flush/schedule */
|
||||||
|
#define IWL_TX_FIFO_AUX 5
|
||||||
|
#define IWL_TX_FIFO_UNUSED -1
|
||||||
|
|
||||||
|
#define IWLAGN_CMD_FIFO_NUM 7
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This queue number is required for proper operation
|
||||||
|
* because the ucode will stop/start the scheduler as
|
||||||
|
* required.
|
||||||
|
*/
|
||||||
|
#define IWL_IPAN_MCAST_QUEUE 8
|
||||||
|
|
||||||
|
static const u8 iwlagn_default_queue_to_tx_fifo[] = {
|
||||||
|
IWL_TX_FIFO_VO,
|
||||||
|
IWL_TX_FIFO_VI,
|
||||||
|
IWL_TX_FIFO_BE,
|
||||||
|
IWL_TX_FIFO_BK,
|
||||||
|
IWLAGN_CMD_FIFO_NUM,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
|
||||||
|
IWL_TX_FIFO_VO,
|
||||||
|
IWL_TX_FIFO_VI,
|
||||||
|
IWL_TX_FIFO_BE,
|
||||||
|
IWL_TX_FIFO_BK,
|
||||||
|
IWL_TX_FIFO_BK_IPAN,
|
||||||
|
IWL_TX_FIFO_BE_IPAN,
|
||||||
|
IWL_TX_FIFO_VI_IPAN,
|
||||||
|
IWL_TX_FIFO_VO_IPAN,
|
||||||
|
IWL_TX_FIFO_BE_IPAN,
|
||||||
|
IWLAGN_CMD_FIFO_NUM,
|
||||||
|
IWL_TX_FIFO_AUX,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u8 iwlagn_bss_ac_to_fifo[] = {
|
||||||
|
IWL_TX_FIFO_VO,
|
||||||
|
IWL_TX_FIFO_VI,
|
||||||
|
IWL_TX_FIFO_BE,
|
||||||
|
IWL_TX_FIFO_BK,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u8 iwlagn_bss_ac_to_queue[] = {
|
||||||
|
0, 1, 2, 3,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u8 iwlagn_pan_ac_to_fifo[] = {
|
||||||
|
IWL_TX_FIFO_VO_IPAN,
|
||||||
|
IWL_TX_FIFO_VI_IPAN,
|
||||||
|
IWL_TX_FIFO_BE_IPAN,
|
||||||
|
IWL_TX_FIFO_BK_IPAN,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u8 iwlagn_pan_ac_to_queue[] = {
|
||||||
|
7, 6, 5, 4,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u8 iwlagn_bss_queue_to_ac[] = {
|
||||||
|
IEEE80211_AC_VO,
|
||||||
|
IEEE80211_AC_VI,
|
||||||
|
IEEE80211_AC_BE,
|
||||||
|
IEEE80211_AC_BK,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u8 iwlagn_pan_queue_to_ac[] = {
|
||||||
|
IEEE80211_AC_VO,
|
||||||
|
IEEE80211_AC_VI,
|
||||||
|
IEEE80211_AC_BE,
|
||||||
|
IEEE80211_AC_BK,
|
||||||
|
IEEE80211_AC_BK,
|
||||||
|
IEEE80211_AC_BE,
|
||||||
|
IEEE80211_AC_VI,
|
||||||
|
IEEE80211_AC_VO,
|
||||||
|
};
|
||||||
|
|
||||||
static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -496,9 +586,9 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
|||||||
* The default context is always valid,
|
* The default context is always valid,
|
||||||
* the PAN context depends on uCode.
|
* the PAN context depends on uCode.
|
||||||
*/
|
*/
|
||||||
priv->shrd->valid_contexts = BIT(IWL_RXON_CTX_BSS);
|
priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
|
||||||
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
|
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
|
||||||
priv->shrd->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
|
priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
|
||||||
|
|
||||||
for (i = 0; i < NUM_IWL_RXON_CTX; i++)
|
for (i = 0; i < NUM_IWL_RXON_CTX; i++)
|
||||||
priv->contexts[i].ctxid = i;
|
priv->contexts[i].ctxid = i;
|
||||||
@ -520,6 +610,10 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
|||||||
priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
|
priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
|
||||||
priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
|
priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
|
||||||
priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
|
priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
|
||||||
|
memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
|
||||||
|
iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
|
||||||
|
memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
|
||||||
|
iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
|
||||||
|
|
||||||
priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
|
priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
|
||||||
priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
|
priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
|
||||||
@ -542,6 +636,11 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
|
|||||||
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
|
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
|
||||||
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
|
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
|
||||||
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
|
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
|
||||||
|
memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
|
||||||
|
iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
|
||||||
|
memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
|
||||||
|
iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
|
||||||
|
priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
|
||||||
|
|
||||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||||
}
|
}
|
||||||
@ -824,11 +923,10 @@ void iwl_down(struct iwl_priv *priv)
|
|||||||
STATUS_RF_KILL_HW |
|
STATUS_RF_KILL_HW |
|
||||||
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
|
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
|
||||||
STATUS_GEO_CONFIGURED |
|
STATUS_GEO_CONFIGURED |
|
||||||
|
test_bit(STATUS_FW_ERROR, &priv->status) <<
|
||||||
|
STATUS_FW_ERROR |
|
||||||
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
|
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
|
||||||
STATUS_EXIT_PENDING;
|
STATUS_EXIT_PENDING;
|
||||||
priv->shrd->status &=
|
|
||||||
test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
|
|
||||||
STATUS_FW_ERROR;
|
|
||||||
|
|
||||||
dev_kfree_skb(priv->beacon_skb);
|
dev_kfree_skb(priv->beacon_skb);
|
||||||
priv->beacon_skb = NULL;
|
priv->beacon_skb = NULL;
|
||||||
@ -869,6 +967,7 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
|
|||||||
u8 bt_load;
|
u8 bt_load;
|
||||||
u8 bt_status;
|
u8 bt_status;
|
||||||
bool bt_is_sco;
|
bool bt_is_sco;
|
||||||
|
int i;
|
||||||
|
|
||||||
lockdep_assert_held(&priv->mutex);
|
lockdep_assert_held(&priv->mutex);
|
||||||
|
|
||||||
@ -898,6 +997,15 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
|
|||||||
priv->bt_traffic_load = bt_load;
|
priv->bt_traffic_load = bt_load;
|
||||||
priv->bt_status = bt_status;
|
priv->bt_status = bt_status;
|
||||||
priv->bt_is_sco = bt_is_sco;
|
priv->bt_is_sco = bt_is_sco;
|
||||||
|
|
||||||
|
/* reset all queues */
|
||||||
|
for (i = 0; i < IEEE80211_NUM_ACS; i++)
|
||||||
|
atomic_set(&priv->ac_stop_count[i], 0);
|
||||||
|
|
||||||
|
for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
|
||||||
|
priv->queue_to_ac[i] = IWL_INVALID_AC;
|
||||||
|
|
||||||
|
memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_bg_restart(struct work_struct *data)
|
static void iwl_bg_restart(struct work_struct *data)
|
||||||
@ -907,7 +1015,7 @@ static void iwl_bg_restart(struct work_struct *data)
|
|||||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
|
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
|
||||||
mutex_lock(&priv->mutex);
|
mutex_lock(&priv->mutex);
|
||||||
iwlagn_prepare_restart(priv);
|
iwlagn_prepare_restart(priv);
|
||||||
mutex_unlock(&priv->mutex);
|
mutex_unlock(&priv->mutex);
|
||||||
@ -1028,6 +1136,189 @@ static void iwl_init_hw_rates(struct ieee80211_rate *rates)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
|
||||||
|
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
|
||||||
|
static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
|
||||||
|
struct ieee80211_sta_ht_cap *ht_info,
|
||||||
|
enum ieee80211_band band)
|
||||||
|
{
|
||||||
|
u16 max_bit_rate = 0;
|
||||||
|
u8 rx_chains_num = hw_params(priv).rx_chains_num;
|
||||||
|
u8 tx_chains_num = hw_params(priv).tx_chains_num;
|
||||||
|
|
||||||
|
ht_info->cap = 0;
|
||||||
|
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
|
||||||
|
|
||||||
|
ht_info->ht_supported = true;
|
||||||
|
|
||||||
|
if (cfg(priv)->ht_params &&
|
||||||
|
cfg(priv)->ht_params->ht_greenfield_support)
|
||||||
|
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
|
||||||
|
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
|
||||||
|
max_bit_rate = MAX_BIT_RATE_20_MHZ;
|
||||||
|
if (hw_params(priv).ht40_channel & BIT(band)) {
|
||||||
|
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
|
||||||
|
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
|
||||||
|
ht_info->mcs.rx_mask[4] = 0x01;
|
||||||
|
max_bit_rate = MAX_BIT_RATE_40_MHZ;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iwlagn_mod_params.amsdu_size_8K)
|
||||||
|
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
|
||||||
|
|
||||||
|
ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
|
||||||
|
ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
|
||||||
|
|
||||||
|
ht_info->mcs.rx_mask[0] = 0xFF;
|
||||||
|
if (rx_chains_num >= 2)
|
||||||
|
ht_info->mcs.rx_mask[1] = 0xFF;
|
||||||
|
if (rx_chains_num >= 3)
|
||||||
|
ht_info->mcs.rx_mask[2] = 0xFF;
|
||||||
|
|
||||||
|
/* Highest supported Rx data rate */
|
||||||
|
max_bit_rate *= rx_chains_num;
|
||||||
|
WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
|
||||||
|
ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
|
||||||
|
|
||||||
|
/* Tx MCS capabilities */
|
||||||
|
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
|
||||||
|
if (tx_chains_num != rx_chains_num) {
|
||||||
|
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
|
||||||
|
ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
|
||||||
|
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
|
||||||
|
*/
|
||||||
|
static int iwl_init_geos(struct iwl_priv *priv)
|
||||||
|
{
|
||||||
|
struct iwl_channel_info *ch;
|
||||||
|
struct ieee80211_supported_band *sband;
|
||||||
|
struct ieee80211_channel *channels;
|
||||||
|
struct ieee80211_channel *geo_ch;
|
||||||
|
struct ieee80211_rate *rates;
|
||||||
|
int i = 0;
|
||||||
|
s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
|
||||||
|
|
||||||
|
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
|
||||||
|
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
|
||||||
|
IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
|
||||||
|
set_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
channels = kcalloc(priv->channel_count,
|
||||||
|
sizeof(struct ieee80211_channel), GFP_KERNEL);
|
||||||
|
if (!channels)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!rates) {
|
||||||
|
kfree(channels);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 5.2GHz channels start after the 2.4GHz channels */
|
||||||
|
sband = &priv->bands[IEEE80211_BAND_5GHZ];
|
||||||
|
sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
|
||||||
|
/* just OFDM */
|
||||||
|
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
|
||||||
|
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
|
||||||
|
|
||||||
|
if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
|
||||||
|
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
|
||||||
|
IEEE80211_BAND_5GHZ);
|
||||||
|
|
||||||
|
sband = &priv->bands[IEEE80211_BAND_2GHZ];
|
||||||
|
sband->channels = channels;
|
||||||
|
/* OFDM & CCK */
|
||||||
|
sband->bitrates = rates;
|
||||||
|
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
|
||||||
|
|
||||||
|
if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
|
||||||
|
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
|
||||||
|
IEEE80211_BAND_2GHZ);
|
||||||
|
|
||||||
|
priv->ieee_channels = channels;
|
||||||
|
priv->ieee_rates = rates;
|
||||||
|
|
||||||
|
for (i = 0; i < priv->channel_count; i++) {
|
||||||
|
ch = &priv->channel_info[i];
|
||||||
|
|
||||||
|
/* FIXME: might be removed if scan is OK */
|
||||||
|
if (!is_channel_valid(ch))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
sband = &priv->bands[ch->band];
|
||||||
|
|
||||||
|
geo_ch = &sband->channels[sband->n_channels++];
|
||||||
|
|
||||||
|
geo_ch->center_freq =
|
||||||
|
ieee80211_channel_to_frequency(ch->channel, ch->band);
|
||||||
|
geo_ch->max_power = ch->max_power_avg;
|
||||||
|
geo_ch->max_antenna_gain = 0xff;
|
||||||
|
geo_ch->hw_value = ch->channel;
|
||||||
|
|
||||||
|
if (is_channel_valid(ch)) {
|
||||||
|
if (!(ch->flags & EEPROM_CHANNEL_IBSS))
|
||||||
|
geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
|
||||||
|
|
||||||
|
if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
|
||||||
|
geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
|
||||||
|
|
||||||
|
if (ch->flags & EEPROM_CHANNEL_RADAR)
|
||||||
|
geo_ch->flags |= IEEE80211_CHAN_RADAR;
|
||||||
|
|
||||||
|
geo_ch->flags |= ch->ht40_extension_channel;
|
||||||
|
|
||||||
|
if (ch->max_power_avg > max_tx_power)
|
||||||
|
max_tx_power = ch->max_power_avg;
|
||||||
|
} else {
|
||||||
|
geo_ch->flags |= IEEE80211_CHAN_DISABLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
|
||||||
|
ch->channel, geo_ch->center_freq,
|
||||||
|
is_channel_a_band(ch) ? "5.2" : "2.4",
|
||||||
|
geo_ch->flags & IEEE80211_CHAN_DISABLED ?
|
||||||
|
"restricted" : "valid",
|
||||||
|
geo_ch->flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
priv->tx_power_device_lmt = max_tx_power;
|
||||||
|
priv->tx_power_user_lmt = max_tx_power;
|
||||||
|
priv->tx_power_next = max_tx_power;
|
||||||
|
|
||||||
|
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
|
||||||
|
hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) {
|
||||||
|
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
|
||||||
|
"Please send your %s to maintainer.\n",
|
||||||
|
trans(priv)->hw_id_str);
|
||||||
|
hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
|
||||||
|
}
|
||||||
|
|
||||||
|
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
|
||||||
|
priv->bands[IEEE80211_BAND_2GHZ].n_channels,
|
||||||
|
priv->bands[IEEE80211_BAND_5GHZ].n_channels);
|
||||||
|
|
||||||
|
set_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* iwl_free_geos - undo allocations in iwl_init_geos
|
||||||
|
*/
|
||||||
|
static void iwl_free_geos(struct iwl_priv *priv)
|
||||||
|
{
|
||||||
|
kfree(priv->ieee_channels);
|
||||||
|
kfree(priv->ieee_rates);
|
||||||
|
clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
||||||
|
}
|
||||||
|
|
||||||
static int iwl_init_drv(struct iwl_priv *priv)
|
static int iwl_init_drv(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -1130,8 +1421,6 @@ static void iwl_set_hw_params(struct iwl_priv *priv)
|
|||||||
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
|
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
|
||||||
hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
|
hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
|
||||||
|
|
||||||
hw_params(priv).num_ampdu_queues =
|
|
||||||
cfg(priv)->base_params->num_of_ampdu_queues;
|
|
||||||
hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
|
hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
|
||||||
|
|
||||||
/* Device-specific setup */
|
/* Device-specific setup */
|
||||||
@ -1178,7 +1467,6 @@ static void iwl_debug_config(struct iwl_priv *priv)
|
|||||||
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
||||||
const struct iwl_fw *fw)
|
const struct iwl_fw *fw)
|
||||||
{
|
{
|
||||||
int err = 0;
|
|
||||||
struct iwl_priv *priv;
|
struct iwl_priv *priv;
|
||||||
struct ieee80211_hw *hw;
|
struct ieee80211_hw *hw;
|
||||||
struct iwl_op_mode *op_mode;
|
struct iwl_op_mode *op_mode;
|
||||||
@ -1193,6 +1481,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
STATISTICS_NOTIFICATION,
|
STATISTICS_NOTIFICATION,
|
||||||
REPLY_TX,
|
REPLY_TX,
|
||||||
};
|
};
|
||||||
|
const u8 *q_to_ac;
|
||||||
|
int n_q_to_ac;
|
||||||
|
int i;
|
||||||
|
|
||||||
/************************
|
/************************
|
||||||
* 1. Allocating HW data
|
* 1. Allocating HW data
|
||||||
@ -1201,7 +1492,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
if (!hw) {
|
if (!hw) {
|
||||||
pr_err("%s: Cannot allocate network device\n",
|
pr_err("%s: Cannot allocate network device\n",
|
||||||
cfg(trans)->name);
|
cfg(trans)->name);
|
||||||
err = -ENOMEM;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1210,8 +1500,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
priv = IWL_OP_MODE_GET_DVM(op_mode);
|
priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||||
priv->shrd = trans->shrd;
|
priv->shrd = trans->shrd;
|
||||||
priv->fw = fw;
|
priv->fw = fw;
|
||||||
/* TODO: remove fw from shared data later */
|
|
||||||
priv->shrd->fw = fw;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Populate the state variables that the transport layer needs
|
* Populate the state variables that the transport layer needs
|
||||||
@ -1230,9 +1518,19 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
|
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
|
||||||
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
|
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
|
||||||
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
|
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
|
||||||
|
trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
|
||||||
|
trans_cfg.n_queue_to_fifo =
|
||||||
|
ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
|
||||||
|
q_to_ac = iwlagn_pan_queue_to_ac;
|
||||||
|
n_q_to_ac = ARRAY_SIZE(iwlagn_pan_queue_to_ac);
|
||||||
} else {
|
} else {
|
||||||
priv->sta_key_max_num = STA_KEY_MAX_NUM;
|
priv->sta_key_max_num = STA_KEY_MAX_NUM;
|
||||||
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
|
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
|
||||||
|
trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
|
||||||
|
trans_cfg.n_queue_to_fifo =
|
||||||
|
ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
|
||||||
|
q_to_ac = iwlagn_bss_queue_to_ac;
|
||||||
|
n_q_to_ac = ARRAY_SIZE(iwlagn_bss_queue_to_ac);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Configure transport layer */
|
/* Configure transport layer */
|
||||||
@ -1273,26 +1571,24 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
|
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
|
||||||
cfg(priv)->name, trans(priv)->hw_rev);
|
cfg(priv)->name, trans(priv)->hw_rev);
|
||||||
|
|
||||||
err = iwl_trans_start_hw(trans(priv));
|
if (iwl_trans_start_hw(trans(priv)))
|
||||||
if (err)
|
|
||||||
goto out_free_traffic_mem;
|
goto out_free_traffic_mem;
|
||||||
|
|
||||||
/*****************
|
/*****************
|
||||||
* 3. Read EEPROM
|
* 3. Read EEPROM
|
||||||
*****************/
|
*****************/
|
||||||
err = iwl_eeprom_init(trans(priv), trans(priv)->hw_rev);
|
/* Read the EEPROM */
|
||||||
/* Reset chip to save power until we load uCode during "up". */
|
if (iwl_eeprom_init(trans(priv), trans(priv)->hw_rev)) {
|
||||||
iwl_trans_stop_hw(trans(priv));
|
|
||||||
if (err) {
|
|
||||||
IWL_ERR(priv, "Unable to init EEPROM\n");
|
IWL_ERR(priv, "Unable to init EEPROM\n");
|
||||||
goto out_free_traffic_mem;
|
goto out_free_traffic_mem;
|
||||||
}
|
}
|
||||||
err = iwl_eeprom_check_version(priv);
|
/* Reset chip to save power until we load uCode during "up". */
|
||||||
if (err)
|
iwl_trans_stop_hw(trans(priv));
|
||||||
|
|
||||||
|
if (iwl_eeprom_check_version(priv))
|
||||||
goto out_free_eeprom;
|
goto out_free_eeprom;
|
||||||
|
|
||||||
err = iwl_eeprom_init_hw_params(priv);
|
if (iwl_eeprom_init_hw_params(priv))
|
||||||
if (err)
|
|
||||||
goto out_free_eeprom;
|
goto out_free_eeprom;
|
||||||
|
|
||||||
/* extract MAC Address */
|
/* extract MAC Address */
|
||||||
@ -1323,6 +1619,11 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
|
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
|
||||||
priv->sta_key_max_num = STA_KEY_MAX_NUM;
|
priv->sta_key_max_num = STA_KEY_MAX_NUM;
|
||||||
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
|
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
|
||||||
|
trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
|
||||||
|
trans_cfg.n_queue_to_fifo =
|
||||||
|
ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
|
||||||
|
q_to_ac = iwlagn_bss_queue_to_ac;
|
||||||
|
n_q_to_ac = ARRAY_SIZE(iwlagn_bss_queue_to_ac);
|
||||||
|
|
||||||
/* Configure transport layer again*/
|
/* Configure transport layer again*/
|
||||||
iwl_trans_configure(trans(priv), &trans_cfg);
|
iwl_trans_configure(trans(priv), &trans_cfg);
|
||||||
@ -1331,10 +1632,22 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
/*******************
|
/*******************
|
||||||
* 5. Setup priv
|
* 5. Setup priv
|
||||||
*******************/
|
*******************/
|
||||||
|
for (i = 0; i < IEEE80211_NUM_ACS; i++)
|
||||||
|
atomic_set(&priv->ac_stop_count[i], 0);
|
||||||
|
|
||||||
err = iwl_init_drv(priv);
|
for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
|
||||||
if (err)
|
if (i < n_q_to_ac)
|
||||||
|
priv->queue_to_ac[i] = q_to_ac[i];
|
||||||
|
else
|
||||||
|
priv->queue_to_ac[i] = IWL_INVALID_AC;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
|
||||||
|
IWLAGN_CMD_FIFO_NUM);
|
||||||
|
|
||||||
|
if (iwl_init_drv(priv))
|
||||||
goto out_free_eeprom;
|
goto out_free_eeprom;
|
||||||
|
|
||||||
/* At this point both hw and priv are initialized. */
|
/* At this point both hw and priv are initialized. */
|
||||||
|
|
||||||
/********************
|
/********************
|
||||||
@ -1367,15 +1680,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
|
|||||||
*
|
*
|
||||||
* 7. Setup and register with mac80211 and debugfs
|
* 7. Setup and register with mac80211 and debugfs
|
||||||
**************************************************/
|
**************************************************/
|
||||||
err = iwlagn_mac_setup_register(priv, &fw->ucode_capa);
|
if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
|
||||||
if (err)
|
|
||||||
goto out_destroy_workqueue;
|
goto out_destroy_workqueue;
|
||||||
|
|
||||||
err = iwl_dbgfs_register(priv, DRV_NAME);
|
if (iwl_dbgfs_register(priv, DRV_NAME))
|
||||||
if (err)
|
|
||||||
IWL_ERR(priv,
|
IWL_ERR(priv,
|
||||||
"failed to create debugfs files. Ignoring error: %d\n",
|
"failed to create debugfs files. Ignoring error\n");
|
||||||
err);
|
|
||||||
|
|
||||||
return op_mode;
|
return op_mode;
|
||||||
|
|
||||||
@ -1429,13 +1739,399 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
|
|||||||
ieee80211_free_hw(priv->hw);
|
ieee80211_free_hw(priv->hw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char * const desc_lookup_text[] = {
|
||||||
|
"OK",
|
||||||
|
"FAIL",
|
||||||
|
"BAD_PARAM",
|
||||||
|
"BAD_CHECKSUM",
|
||||||
|
"NMI_INTERRUPT_WDG",
|
||||||
|
"SYSASSERT",
|
||||||
|
"FATAL_ERROR",
|
||||||
|
"BAD_COMMAND",
|
||||||
|
"HW_ERROR_TUNE_LOCK",
|
||||||
|
"HW_ERROR_TEMPERATURE",
|
||||||
|
"ILLEGAL_CHAN_FREQ",
|
||||||
|
"VCC_NOT_STABLE",
|
||||||
|
"FH_ERROR",
|
||||||
|
"NMI_INTERRUPT_HOST",
|
||||||
|
"NMI_INTERRUPT_ACTION_PT",
|
||||||
|
"NMI_INTERRUPT_UNKNOWN",
|
||||||
|
"UCODE_VERSION_MISMATCH",
|
||||||
|
"HW_ERROR_ABS_LOCK",
|
||||||
|
"HW_ERROR_CAL_LOCK_FAIL",
|
||||||
|
"NMI_INTERRUPT_INST_ACTION_PT",
|
||||||
|
"NMI_INTERRUPT_DATA_ACTION_PT",
|
||||||
|
"NMI_TRM_HW_ER",
|
||||||
|
"NMI_INTERRUPT_TRM",
|
||||||
|
"NMI_INTERRUPT_BREAK_POINT",
|
||||||
|
"DEBUG_0",
|
||||||
|
"DEBUG_1",
|
||||||
|
"DEBUG_2",
|
||||||
|
"DEBUG_3",
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct { char *name; u8 num; } advanced_lookup[] = {
|
||||||
|
{ "NMI_INTERRUPT_WDG", 0x34 },
|
||||||
|
{ "SYSASSERT", 0x35 },
|
||||||
|
{ "UCODE_VERSION_MISMATCH", 0x37 },
|
||||||
|
{ "BAD_COMMAND", 0x38 },
|
||||||
|
{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
|
||||||
|
{ "FATAL_ERROR", 0x3D },
|
||||||
|
{ "NMI_TRM_HW_ERR", 0x46 },
|
||||||
|
{ "NMI_INTERRUPT_TRM", 0x4C },
|
||||||
|
{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
|
||||||
|
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
|
||||||
|
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
|
||||||
|
{ "NMI_INTERRUPT_HOST", 0x66 },
|
||||||
|
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
|
||||||
|
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
|
||||||
|
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
|
||||||
|
{ "ADVANCED_SYSASSERT", 0 },
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char *desc_lookup(u32 num)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
int max = ARRAY_SIZE(desc_lookup_text);
|
||||||
|
|
||||||
|
if (num < max)
|
||||||
|
return desc_lookup_text[num];
|
||||||
|
|
||||||
|
max = ARRAY_SIZE(advanced_lookup) - 1;
|
||||||
|
for (i = 0; i < max; i++) {
|
||||||
|
if (advanced_lookup[i].num == num)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return advanced_lookup[i].name;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ERROR_START_OFFSET (1 * sizeof(u32))
|
||||||
|
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
|
||||||
|
|
||||||
|
static void iwl_dump_nic_error_log(struct iwl_priv *priv)
|
||||||
|
{
|
||||||
|
struct iwl_trans *trans = trans(priv);
|
||||||
|
u32 base;
|
||||||
|
struct iwl_error_event_table table;
|
||||||
|
|
||||||
|
base = priv->device_pointers.error_event_table;
|
||||||
|
if (priv->cur_ucode == IWL_UCODE_INIT) {
|
||||||
|
if (!base)
|
||||||
|
base = priv->fw->init_errlog_ptr;
|
||||||
|
} else {
|
||||||
|
if (!base)
|
||||||
|
base = priv->fw->inst_errlog_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||||
|
IWL_ERR(priv,
|
||||||
|
"Not valid error log pointer 0x%08X for %s uCode\n",
|
||||||
|
base,
|
||||||
|
(priv->cur_ucode == IWL_UCODE_INIT)
|
||||||
|
? "Init" : "RT");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*TODO: Update dbgfs with ISR error stats obtained below */
|
||||||
|
iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
|
||||||
|
|
||||||
|
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||||
|
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||||
|
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
|
||||||
|
priv->shrd->status, table.valid);
|
||||||
|
}
|
||||||
|
|
||||||
|
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
|
||||||
|
table.data1, table.data2, table.line,
|
||||||
|
table.blink1, table.blink2, table.ilink1,
|
||||||
|
table.ilink2, table.bcon_time, table.gp1,
|
||||||
|
table.gp2, table.gp3, table.ucode_ver,
|
||||||
|
table.hw_ver, table.brd_ver);
|
||||||
|
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
|
||||||
|
desc_lookup(table.error_id));
|
||||||
|
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
|
||||||
|
IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
|
||||||
|
IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
|
||||||
|
IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
|
||||||
|
IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
|
||||||
|
IWL_ERR(priv, "0x%08X | data1\n", table.data1);
|
||||||
|
IWL_ERR(priv, "0x%08X | data2\n", table.data2);
|
||||||
|
IWL_ERR(priv, "0x%08X | line\n", table.line);
|
||||||
|
IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
|
||||||
|
IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
|
||||||
|
IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
|
||||||
|
IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
|
||||||
|
IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
|
||||||
|
IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
|
||||||
|
IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
|
||||||
|
IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
|
||||||
|
IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
|
||||||
|
IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
|
||||||
|
IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
|
||||||
|
IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
|
||||||
|
IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
|
||||||
|
IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
|
||||||
|
IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
|
||||||
|
IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
|
||||||
|
IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
|
||||||
|
IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
|
||||||
|
IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
|
||||||
|
IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
|
||||||
|
IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
|
||||||
|
IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
|
||||||
|
IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
|
||||||
|
IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define EVENT_START_OFFSET (4 * sizeof(u32))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iwl_print_event_log - Dump error event log to syslog
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
|
||||||
|
u32 num_events, u32 mode,
|
||||||
|
int pos, char **buf, size_t bufsz)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
u32 base; /* SRAM byte address of event log header */
|
||||||
|
u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
|
||||||
|
u32 ptr; /* SRAM byte address of log data */
|
||||||
|
u32 ev, time, data; /* event log data */
|
||||||
|
unsigned long reg_flags;
|
||||||
|
|
||||||
|
struct iwl_trans *trans = trans(priv);
|
||||||
|
|
||||||
|
if (num_events == 0)
|
||||||
|
return pos;
|
||||||
|
|
||||||
|
base = priv->device_pointers.log_event_table;
|
||||||
|
if (priv->cur_ucode == IWL_UCODE_INIT) {
|
||||||
|
if (!base)
|
||||||
|
base = priv->fw->init_evtlog_ptr;
|
||||||
|
} else {
|
||||||
|
if (!base)
|
||||||
|
base = priv->fw->inst_evtlog_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode == 0)
|
||||||
|
event_size = 2 * sizeof(u32);
|
||||||
|
else
|
||||||
|
event_size = 3 * sizeof(u32);
|
||||||
|
|
||||||
|
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
|
||||||
|
|
||||||
|
/* Make sure device is powered up for SRAM reads */
|
||||||
|
spin_lock_irqsave(&trans->reg_lock, reg_flags);
|
||||||
|
if (unlikely(!iwl_grab_nic_access(trans)))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
/* Set starting address; reads will auto-increment */
|
||||||
|
iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
|
||||||
|
|
||||||
|
/* "time" is actually "data" for mode 0 (no timestamp).
|
||||||
|
* place event id # at far right for easier visual parsing. */
|
||||||
|
for (i = 0; i < num_events; i++) {
|
||||||
|
ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||||
|
time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||||
|
if (mode == 0) {
|
||||||
|
/* data, ev */
|
||||||
|
if (bufsz) {
|
||||||
|
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||||
|
"EVT_LOG:0x%08x:%04u\n",
|
||||||
|
time, ev);
|
||||||
|
} else {
|
||||||
|
trace_iwlwifi_dev_ucode_event(trans->dev, 0,
|
||||||
|
time, ev);
|
||||||
|
IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
|
||||||
|
time, ev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
||||||
|
if (bufsz) {
|
||||||
|
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||||
|
"EVT_LOGT:%010u:0x%08x:%04u\n",
|
||||||
|
time, data, ev);
|
||||||
|
} else {
|
||||||
|
IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
|
||||||
|
time, data, ev);
|
||||||
|
trace_iwlwifi_dev_ucode_event(trans->dev, time,
|
||||||
|
data, ev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allow device to power down */
|
||||||
|
iwl_release_nic_access(trans);
|
||||||
|
out_unlock:
|
||||||
|
spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
|
||||||
|
*/
|
||||||
|
static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
|
||||||
|
u32 num_wraps, u32 next_entry,
|
||||||
|
u32 size, u32 mode,
|
||||||
|
int pos, char **buf, size_t bufsz)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* display the newest DEFAULT_LOG_ENTRIES entries
|
||||||
|
* i.e the entries just before the next ont that uCode would fill.
|
||||||
|
*/
|
||||||
|
if (num_wraps) {
|
||||||
|
if (next_entry < size) {
|
||||||
|
pos = iwl_print_event_log(priv,
|
||||||
|
capacity - (size - next_entry),
|
||||||
|
size - next_entry, mode,
|
||||||
|
pos, buf, bufsz);
|
||||||
|
pos = iwl_print_event_log(priv, 0,
|
||||||
|
next_entry, mode,
|
||||||
|
pos, buf, bufsz);
|
||||||
|
} else
|
||||||
|
pos = iwl_print_event_log(priv, next_entry - size,
|
||||||
|
size, mode, pos, buf, bufsz);
|
||||||
|
} else {
|
||||||
|
if (next_entry < size) {
|
||||||
|
pos = iwl_print_event_log(priv, 0, next_entry,
|
||||||
|
mode, pos, buf, bufsz);
|
||||||
|
} else {
|
||||||
|
pos = iwl_print_event_log(priv, next_entry - size,
|
||||||
|
size, mode, pos, buf, bufsz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
|
||||||
|
|
||||||
|
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
|
||||||
|
char **buf, bool display)
|
||||||
|
{
|
||||||
|
u32 base; /* SRAM byte address of event log header */
|
||||||
|
u32 capacity; /* event log capacity in # entries */
|
||||||
|
u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
|
||||||
|
u32 num_wraps; /* # times uCode wrapped to top of log */
|
||||||
|
u32 next_entry; /* index of next entry to be written by uCode */
|
||||||
|
u32 size; /* # entries that we'll print */
|
||||||
|
u32 logsize;
|
||||||
|
int pos = 0;
|
||||||
|
size_t bufsz = 0;
|
||||||
|
struct iwl_trans *trans = trans(priv);
|
||||||
|
|
||||||
|
base = priv->device_pointers.log_event_table;
|
||||||
|
if (priv->cur_ucode == IWL_UCODE_INIT) {
|
||||||
|
logsize = priv->fw->init_evtlog_size;
|
||||||
|
if (!base)
|
||||||
|
base = priv->fw->init_evtlog_ptr;
|
||||||
|
} else {
|
||||||
|
logsize = priv->fw->inst_evtlog_size;
|
||||||
|
if (!base)
|
||||||
|
base = priv->fw->inst_evtlog_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||||
|
IWL_ERR(priv,
|
||||||
|
"Invalid event log pointer 0x%08X for %s uCode\n",
|
||||||
|
base,
|
||||||
|
(priv->cur_ucode == IWL_UCODE_INIT)
|
||||||
|
? "Init" : "RT");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* event log header */
|
||||||
|
capacity = iwl_read_targ_mem(trans, base);
|
||||||
|
mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
|
||||||
|
num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
|
||||||
|
next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
|
||||||
|
|
||||||
|
if (capacity > logsize) {
|
||||||
|
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
|
||||||
|
"entries\n", capacity, logsize);
|
||||||
|
capacity = logsize;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (next_entry > logsize) {
|
||||||
|
IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
|
||||||
|
next_entry, logsize);
|
||||||
|
next_entry = logsize;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = num_wraps ? capacity : next_entry;
|
||||||
|
|
||||||
|
/* bail out if nothing in log */
|
||||||
|
if (size == 0) {
|
||||||
|
IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||||
|
if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
|
||||||
|
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
|
||||||
|
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
|
||||||
|
#else
|
||||||
|
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
|
||||||
|
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
|
||||||
|
#endif
|
||||||
|
IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
|
||||||
|
size);
|
||||||
|
|
||||||
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||||
|
if (display) {
|
||||||
|
if (full_log)
|
||||||
|
bufsz = capacity * 48;
|
||||||
|
else
|
||||||
|
bufsz = size * 48;
|
||||||
|
*buf = kmalloc(bufsz, GFP_KERNEL);
|
||||||
|
if (!*buf)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
|
||||||
|
/*
|
||||||
|
* if uCode has wrapped back to top of log,
|
||||||
|
* start at the oldest entry,
|
||||||
|
* i.e the next one that uCode would fill.
|
||||||
|
*/
|
||||||
|
if (num_wraps)
|
||||||
|
pos = iwl_print_event_log(priv, next_entry,
|
||||||
|
capacity - next_entry, mode,
|
||||||
|
pos, buf, bufsz);
|
||||||
|
/* (then/else) start at top of log */
|
||||||
|
pos = iwl_print_event_log(priv, 0,
|
||||||
|
next_entry, mode, pos, buf, bufsz);
|
||||||
|
} else
|
||||||
|
pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
|
||||||
|
next_entry, size, mode,
|
||||||
|
pos, buf, bufsz);
|
||||||
|
#else
|
||||||
|
pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
|
||||||
|
next_entry, size, mode,
|
||||||
|
pos, buf, bufsz);
|
||||||
|
#endif
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iwl_nic_error(struct iwl_op_mode *op_mode)
|
||||||
|
{
|
||||||
|
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||||
|
|
||||||
|
IWL_ERR(priv, "Loaded firmware version: %s\n",
|
||||||
|
priv->fw->fw_version);
|
||||||
|
|
||||||
|
iwl_dump_nic_error_log(priv);
|
||||||
|
iwl_dump_nic_event_log(priv, false, NULL, false);
|
||||||
|
|
||||||
|
iwlagn_fw_error(priv, false);
|
||||||
|
}
|
||||||
|
|
||||||
static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
|
static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
|
||||||
{
|
{
|
||||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||||
|
|
||||||
if (!iwl_check_for_ct_kill(priv)) {
|
if (!iwl_check_for_ct_kill(priv)) {
|
||||||
IWL_ERR(priv, "Restarting adapter queue is full\n");
|
IWL_ERR(priv, "Restarting adapter queue is full\n");
|
||||||
iwl_nic_error(op_mode);
|
iwlagn_fw_error(priv, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1446,17 +2142,39 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
|
|||||||
cfg(priv)->lib->nic_config(priv);
|
cfg(priv)->lib->nic_config(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
|
static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
|
||||||
{
|
{
|
||||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||||
|
int ac = priv->queue_to_ac[queue];
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(ac == IWL_INVALID_AC))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (atomic_inc_return(&priv->ac_stop_count[ac]) > 1) {
|
||||||
|
IWL_DEBUG_TX_QUEUES(priv,
|
||||||
|
"queue %d (AC %d) already stopped\n",
|
||||||
|
queue, ac);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
set_bit(ac, &priv->transport_queue_stop);
|
set_bit(ac, &priv->transport_queue_stop);
|
||||||
ieee80211_stop_queue(priv->hw, ac);
|
ieee80211_stop_queue(priv->hw, ac);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
|
static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
|
||||||
{
|
{
|
||||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||||
|
int ac = priv->queue_to_ac[queue];
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(ac == IWL_INVALID_AC))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (atomic_dec_return(&priv->ac_stop_count[ac]) > 0) {
|
||||||
|
IWL_DEBUG_TX_QUEUES(priv,
|
||||||
|
"queue %d (AC %d) already awake\n",
|
||||||
|
queue, ac);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
clear_bit(ac, &priv->transport_queue_stop);
|
clear_bit(ac, &priv->transport_queue_stop);
|
||||||
|
|
||||||
|
@ -65,6 +65,13 @@
|
|||||||
|
|
||||||
#include "iwl-dev.h"
|
#include "iwl-dev.h"
|
||||||
|
|
||||||
|
/* The first 11 queues (0-10) are used otherwise */
|
||||||
|
#define IWLAGN_FIRST_AMPDU_QUEUE 11
|
||||||
|
|
||||||
|
/* AUX (TX during scan dwell) queue */
|
||||||
|
#define IWL_AUX_QUEUE 10
|
||||||
|
|
||||||
|
|
||||||
struct iwl_ucode_capabilities;
|
struct iwl_ucode_capabilities;
|
||||||
|
|
||||||
extern struct ieee80211_ops iwlagn_hw_ops;
|
extern struct ieee80211_ops iwlagn_hw_ops;
|
||||||
@ -85,7 +92,6 @@ int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
|
|||||||
struct iwl_rx_cmd_buffer *rxb,
|
struct iwl_rx_cmd_buffer *rxb,
|
||||||
struct iwl_device_cmd *cmd);
|
struct iwl_device_cmd *cmd);
|
||||||
void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
|
void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
|
||||||
void iwl_nic_error(struct iwl_op_mode *op_mode);
|
|
||||||
|
|
||||||
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
|
bool iwl_check_for_ct_kill(struct iwl_priv *priv);
|
||||||
|
|
||||||
@ -115,9 +121,6 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
|
|||||||
struct iwl_rxon_context *ctx);
|
struct iwl_rxon_context *ctx);
|
||||||
|
|
||||||
/* uCode */
|
/* uCode */
|
||||||
int iwlagn_rx_calib_result(struct iwl_priv *priv,
|
|
||||||
struct iwl_rx_cmd_buffer *rxb,
|
|
||||||
struct iwl_device_cmd *cmd);
|
|
||||||
int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
|
int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
|
||||||
void iwl_send_prio_tbl(struct iwl_priv *priv);
|
void iwl_send_prio_tbl(struct iwl_priv *priv);
|
||||||
int iwl_init_alive_start(struct iwl_priv *priv);
|
int iwl_init_alive_start(struct iwl_priv *priv);
|
||||||
@ -128,6 +131,9 @@ int iwl_send_calib_results(struct iwl_priv *priv);
|
|||||||
int iwl_calib_set(struct iwl_priv *priv,
|
int iwl_calib_set(struct iwl_priv *priv,
|
||||||
const struct iwl_calib_hdr *cmd, int len);
|
const struct iwl_calib_hdr *cmd, int len);
|
||||||
void iwl_calib_free_results(struct iwl_priv *priv);
|
void iwl_calib_free_results(struct iwl_priv *priv);
|
||||||
|
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand);
|
||||||
|
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
|
||||||
|
char **buf, bool display);
|
||||||
|
|
||||||
/* lib */
|
/* lib */
|
||||||
int iwlagn_send_tx_power(struct iwl_priv *priv);
|
int iwlagn_send_tx_power(struct iwl_priv *priv);
|
||||||
@ -386,6 +392,15 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
|
|||||||
return iwl_is_ready(priv);
|
return iwl_is_ready(priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
|
||||||
|
{
|
||||||
|
if (state)
|
||||||
|
set_bit(STATUS_POWER_PMI, &priv->status);
|
||||||
|
else
|
||||||
|
clear_bit(STATUS_POWER_PMI, &priv->status);
|
||||||
|
iwl_trans_set_pmi(trans(priv), state);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||||
#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
|
#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/etherdevice.h>
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <net/mac80211.h>
|
#include <net/mac80211.h>
|
||||||
@ -44,189 +43,6 @@
|
|||||||
|
|
||||||
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
|
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
|
||||||
|
|
||||||
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
|
|
||||||
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
|
|
||||||
static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
|
|
||||||
struct ieee80211_sta_ht_cap *ht_info,
|
|
||||||
enum ieee80211_band band)
|
|
||||||
{
|
|
||||||
u16 max_bit_rate = 0;
|
|
||||||
u8 rx_chains_num = hw_params(priv).rx_chains_num;
|
|
||||||
u8 tx_chains_num = hw_params(priv).tx_chains_num;
|
|
||||||
|
|
||||||
ht_info->cap = 0;
|
|
||||||
memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
|
|
||||||
|
|
||||||
ht_info->ht_supported = true;
|
|
||||||
|
|
||||||
if (cfg(priv)->ht_params &&
|
|
||||||
cfg(priv)->ht_params->ht_greenfield_support)
|
|
||||||
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
|
|
||||||
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
|
|
||||||
max_bit_rate = MAX_BIT_RATE_20_MHZ;
|
|
||||||
if (hw_params(priv).ht40_channel & BIT(band)) {
|
|
||||||
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
|
|
||||||
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
|
|
||||||
ht_info->mcs.rx_mask[4] = 0x01;
|
|
||||||
max_bit_rate = MAX_BIT_RATE_40_MHZ;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (iwlagn_mod_params.amsdu_size_8K)
|
|
||||||
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
|
|
||||||
|
|
||||||
ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
|
|
||||||
ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
|
|
||||||
|
|
||||||
ht_info->mcs.rx_mask[0] = 0xFF;
|
|
||||||
if (rx_chains_num >= 2)
|
|
||||||
ht_info->mcs.rx_mask[1] = 0xFF;
|
|
||||||
if (rx_chains_num >= 3)
|
|
||||||
ht_info->mcs.rx_mask[2] = 0xFF;
|
|
||||||
|
|
||||||
/* Highest supported Rx data rate */
|
|
||||||
max_bit_rate *= rx_chains_num;
|
|
||||||
WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
|
|
||||||
ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
|
|
||||||
|
|
||||||
/* Tx MCS capabilities */
|
|
||||||
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
|
|
||||||
if (tx_chains_num != rx_chains_num) {
|
|
||||||
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
|
|
||||||
ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
|
|
||||||
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
|
|
||||||
*/
|
|
||||||
int iwl_init_geos(struct iwl_priv *priv)
|
|
||||||
{
|
|
||||||
struct iwl_channel_info *ch;
|
|
||||||
struct ieee80211_supported_band *sband;
|
|
||||||
struct ieee80211_channel *channels;
|
|
||||||
struct ieee80211_channel *geo_ch;
|
|
||||||
struct ieee80211_rate *rates;
|
|
||||||
int i = 0;
|
|
||||||
s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
|
|
||||||
|
|
||||||
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
|
|
||||||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
|
|
||||||
IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
|
|
||||||
set_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
channels = kcalloc(priv->channel_count,
|
|
||||||
sizeof(struct ieee80211_channel), GFP_KERNEL);
|
|
||||||
if (!channels)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!rates) {
|
|
||||||
kfree(channels);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* 5.2GHz channels start after the 2.4GHz channels */
|
|
||||||
sband = &priv->bands[IEEE80211_BAND_5GHZ];
|
|
||||||
sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
|
|
||||||
/* just OFDM */
|
|
||||||
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
|
|
||||||
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
|
|
||||||
|
|
||||||
if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
|
|
||||||
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
|
|
||||||
IEEE80211_BAND_5GHZ);
|
|
||||||
|
|
||||||
sband = &priv->bands[IEEE80211_BAND_2GHZ];
|
|
||||||
sband->channels = channels;
|
|
||||||
/* OFDM & CCK */
|
|
||||||
sband->bitrates = rates;
|
|
||||||
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
|
|
||||||
|
|
||||||
if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
|
|
||||||
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
|
|
||||||
IEEE80211_BAND_2GHZ);
|
|
||||||
|
|
||||||
priv->ieee_channels = channels;
|
|
||||||
priv->ieee_rates = rates;
|
|
||||||
|
|
||||||
for (i = 0; i < priv->channel_count; i++) {
|
|
||||||
ch = &priv->channel_info[i];
|
|
||||||
|
|
||||||
/* FIXME: might be removed if scan is OK */
|
|
||||||
if (!is_channel_valid(ch))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
sband = &priv->bands[ch->band];
|
|
||||||
|
|
||||||
geo_ch = &sband->channels[sband->n_channels++];
|
|
||||||
|
|
||||||
geo_ch->center_freq =
|
|
||||||
ieee80211_channel_to_frequency(ch->channel, ch->band);
|
|
||||||
geo_ch->max_power = ch->max_power_avg;
|
|
||||||
geo_ch->max_antenna_gain = 0xff;
|
|
||||||
geo_ch->hw_value = ch->channel;
|
|
||||||
|
|
||||||
if (is_channel_valid(ch)) {
|
|
||||||
if (!(ch->flags & EEPROM_CHANNEL_IBSS))
|
|
||||||
geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
|
|
||||||
|
|
||||||
if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
|
|
||||||
geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
|
|
||||||
|
|
||||||
if (ch->flags & EEPROM_CHANNEL_RADAR)
|
|
||||||
geo_ch->flags |= IEEE80211_CHAN_RADAR;
|
|
||||||
|
|
||||||
geo_ch->flags |= ch->ht40_extension_channel;
|
|
||||||
|
|
||||||
if (ch->max_power_avg > max_tx_power)
|
|
||||||
max_tx_power = ch->max_power_avg;
|
|
||||||
} else {
|
|
||||||
geo_ch->flags |= IEEE80211_CHAN_DISABLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
|
|
||||||
ch->channel, geo_ch->center_freq,
|
|
||||||
is_channel_a_band(ch) ? "5.2" : "2.4",
|
|
||||||
geo_ch->flags & IEEE80211_CHAN_DISABLED ?
|
|
||||||
"restricted" : "valid",
|
|
||||||
geo_ch->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
priv->tx_power_device_lmt = max_tx_power;
|
|
||||||
priv->tx_power_user_lmt = max_tx_power;
|
|
||||||
priv->tx_power_next = max_tx_power;
|
|
||||||
|
|
||||||
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
|
|
||||||
hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) {
|
|
||||||
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
|
|
||||||
"Please send your %s to maintainer.\n",
|
|
||||||
trans(priv)->hw_id_str);
|
|
||||||
hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
|
|
||||||
priv->bands[IEEE80211_BAND_2GHZ].n_channels,
|
|
||||||
priv->bands[IEEE80211_BAND_5GHZ].n_channels);
|
|
||||||
|
|
||||||
set_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* iwl_free_geos - undo allocations in iwl_init_geos
|
|
||||||
*/
|
|
||||||
void iwl_free_geos(struct iwl_priv *priv)
|
|
||||||
{
|
|
||||||
kfree(priv->ieee_channels);
|
|
||||||
kfree(priv->ieee_rates);
|
|
||||||
clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool iwl_is_channel_extension(struct iwl_priv *priv,
|
static bool iwl_is_channel_extension(struct iwl_priv *priv,
|
||||||
enum ieee80211_band band,
|
enum ieee80211_band band,
|
||||||
u16 channel, u8 extension_chan_offset)
|
u16 channel, u8 extension_chan_offset)
|
||||||
@ -271,255 +87,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
|
|||||||
ctx->ht.extension_chan_offset);
|
ctx->ht.extension_chan_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
|
|
||||||
{
|
|
||||||
u16 new_val;
|
|
||||||
u16 beacon_factor;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If mac80211 hasn't given us a beacon interval, program
|
|
||||||
* the default into the device (not checking this here
|
|
||||||
* would cause the adjustment below to return the maximum
|
|
||||||
* value, which may break PAN.)
|
|
||||||
*/
|
|
||||||
if (!beacon_val)
|
|
||||||
return DEFAULT_BEACON_INTERVAL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the beacon interval we obtained from the peer
|
|
||||||
* is too large, we'll have to wake up more often
|
|
||||||
* (and in IBSS case, we'll beacon too much)
|
|
||||||
*
|
|
||||||
* For example, if max_beacon_val is 4096, and the
|
|
||||||
* requested beacon interval is 7000, we'll have to
|
|
||||||
* use 3500 to be able to wake up on the beacons.
|
|
||||||
*
|
|
||||||
* This could badly influence beacon detection stats.
|
|
||||||
*/
|
|
||||||
|
|
||||||
beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
|
|
||||||
new_val = beacon_val / beacon_factor;
|
|
||||||
|
|
||||||
if (!new_val)
|
|
||||||
new_val = max_beacon_val;
|
|
||||||
|
|
||||||
return new_val;
|
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|
||||||
{
|
|
||||||
u64 tsf;
|
|
||||||
s32 interval_tm, rem;
|
|
||||||
struct ieee80211_conf *conf = NULL;
|
|
||||||
u16 beacon_int;
|
|
||||||
struct ieee80211_vif *vif = ctx->vif;
|
|
||||||
|
|
||||||
conf = &priv->hw->conf;
|
|
||||||
|
|
||||||
lockdep_assert_held(&priv->mutex);
|
|
||||||
|
|
||||||
memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
|
|
||||||
|
|
||||||
ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
|
|
||||||
ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
|
|
||||||
|
|
||||||
beacon_int = vif ? vif->bss_conf.beacon_int : 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* TODO: For IBSS we need to get atim_window from mac80211,
|
|
||||||
* for now just always use 0
|
|
||||||
*/
|
|
||||||
ctx->timing.atim_window = 0;
|
|
||||||
|
|
||||||
if (ctx->ctxid == IWL_RXON_CTX_PAN &&
|
|
||||||
(!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
|
|
||||||
iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
|
|
||||||
priv->contexts[IWL_RXON_CTX_BSS].vif &&
|
|
||||||
priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
|
|
||||||
ctx->timing.beacon_interval =
|
|
||||||
priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
|
|
||||||
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
|
|
||||||
} else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
|
|
||||||
iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
|
|
||||||
priv->contexts[IWL_RXON_CTX_PAN].vif &&
|
|
||||||
priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
|
|
||||||
(!iwl_is_associated_ctx(ctx) || !ctx->vif ||
|
|
||||||
!ctx->vif->bss_conf.beacon_int)) {
|
|
||||||
ctx->timing.beacon_interval =
|
|
||||||
priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
|
|
||||||
beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
|
|
||||||
} else {
|
|
||||||
beacon_int = iwl_adjust_beacon_interval(beacon_int,
|
|
||||||
IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
|
|
||||||
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->beacon_int = beacon_int;
|
|
||||||
|
|
||||||
tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
|
|
||||||
interval_tm = beacon_int * TIME_UNIT;
|
|
||||||
rem = do_div(tsf, interval_tm);
|
|
||||||
ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
|
|
||||||
|
|
||||||
ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
|
|
||||||
|
|
||||||
IWL_DEBUG_ASSOC(priv,
|
|
||||||
"beacon interval %d beacon timer %d beacon tim %d\n",
|
|
||||||
le16_to_cpu(ctx->timing.beacon_interval),
|
|
||||||
le32_to_cpu(ctx->timing.beacon_init_val),
|
|
||||||
le16_to_cpu(ctx->timing.atim_window));
|
|
||||||
|
|
||||||
return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
|
|
||||||
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
|
|
||||||
}
|
|
||||||
|
|
||||||
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|
||||||
int hw_decrypt)
|
|
||||||
{
|
|
||||||
struct iwl_rxon_cmd *rxon = &ctx->staging;
|
|
||||||
|
|
||||||
if (hw_decrypt)
|
|
||||||
rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
|
|
||||||
else
|
|
||||||
rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* validate RXON structure is valid */
|
|
||||||
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|
||||||
{
|
|
||||||
struct iwl_rxon_cmd *rxon = &ctx->staging;
|
|
||||||
u32 errors = 0;
|
|
||||||
|
|
||||||
if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
|
|
||||||
if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
|
|
||||||
IWL_WARN(priv, "check 2.4G: wrong narrow\n");
|
|
||||||
errors |= BIT(0);
|
|
||||||
}
|
|
||||||
if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
|
|
||||||
IWL_WARN(priv, "check 2.4G: wrong radar\n");
|
|
||||||
errors |= BIT(1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
|
|
||||||
IWL_WARN(priv, "check 5.2G: not short slot!\n");
|
|
||||||
errors |= BIT(2);
|
|
||||||
}
|
|
||||||
if (rxon->flags & RXON_FLG_CCK_MSK) {
|
|
||||||
IWL_WARN(priv, "check 5.2G: CCK!\n");
|
|
||||||
errors |= BIT(3);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
|
|
||||||
IWL_WARN(priv, "mac/bssid mcast!\n");
|
|
||||||
errors |= BIT(4);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* make sure basic rates 6Mbps and 1Mbps are supported */
|
|
||||||
if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
|
|
||||||
(rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
|
|
||||||
IWL_WARN(priv, "neither 1 nor 6 are basic\n");
|
|
||||||
errors |= BIT(5);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (le16_to_cpu(rxon->assoc_id) > 2007) {
|
|
||||||
IWL_WARN(priv, "aid > 2007\n");
|
|
||||||
errors |= BIT(6);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
|
|
||||||
== (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
|
|
||||||
IWL_WARN(priv, "CCK and short slot\n");
|
|
||||||
errors |= BIT(7);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
|
|
||||||
== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
|
|
||||||
IWL_WARN(priv, "CCK and auto detect");
|
|
||||||
errors |= BIT(8);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
|
|
||||||
RXON_FLG_TGG_PROTECT_MSK)) ==
|
|
||||||
RXON_FLG_TGG_PROTECT_MSK) {
|
|
||||||
IWL_WARN(priv, "TGg but no auto-detect\n");
|
|
||||||
errors |= BIT(9);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rxon->channel == 0) {
|
|
||||||
IWL_WARN(priv, "zero channel is invalid\n");
|
|
||||||
errors |= BIT(10);
|
|
||||||
}
|
|
||||||
|
|
||||||
WARN(errors, "Invalid RXON (%#x), channel %d",
|
|
||||||
errors, le16_to_cpu(rxon->channel));
|
|
||||||
|
|
||||||
return errors ? -EINVAL : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
|
|
||||||
* @priv: staging_rxon is compared to active_rxon
|
|
||||||
*
|
|
||||||
* If the RXON structure is changing enough to require a new tune,
|
|
||||||
* or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
|
|
||||||
* a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
|
|
||||||
*/
|
|
||||||
int iwl_full_rxon_required(struct iwl_priv *priv,
|
|
||||||
struct iwl_rxon_context *ctx)
|
|
||||||
{
|
|
||||||
const struct iwl_rxon_cmd *staging = &ctx->staging;
|
|
||||||
const struct iwl_rxon_cmd *active = &ctx->active;
|
|
||||||
|
|
||||||
#define CHK(cond) \
|
|
||||||
if ((cond)) { \
|
|
||||||
IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
|
|
||||||
return 1; \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define CHK_NEQ(c1, c2) \
|
|
||||||
if ((c1) != (c2)) { \
|
|
||||||
IWL_DEBUG_INFO(priv, "need full RXON - " \
|
|
||||||
#c1 " != " #c2 " - %d != %d\n", \
|
|
||||||
(c1), (c2)); \
|
|
||||||
return 1; \
|
|
||||||
}
|
|
||||||
|
|
||||||
/* These items are only settable from the full RXON command */
|
|
||||||
CHK(!iwl_is_associated_ctx(ctx));
|
|
||||||
CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
|
|
||||||
CHK(compare_ether_addr(staging->node_addr, active->node_addr));
|
|
||||||
CHK(compare_ether_addr(staging->wlap_bssid_addr,
|
|
||||||
active->wlap_bssid_addr));
|
|
||||||
CHK_NEQ(staging->dev_type, active->dev_type);
|
|
||||||
CHK_NEQ(staging->channel, active->channel);
|
|
||||||
CHK_NEQ(staging->air_propagation, active->air_propagation);
|
|
||||||
CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
|
|
||||||
active->ofdm_ht_single_stream_basic_rates);
|
|
||||||
CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
|
|
||||||
active->ofdm_ht_dual_stream_basic_rates);
|
|
||||||
CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
|
|
||||||
active->ofdm_ht_triple_stream_basic_rates);
|
|
||||||
CHK_NEQ(staging->assoc_id, active->assoc_id);
|
|
||||||
|
|
||||||
/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
|
|
||||||
* be updated with the RXON_ASSOC command -- however only some
|
|
||||||
* flag transitions are allowed using RXON_ASSOC */
|
|
||||||
|
|
||||||
/* Check if we are not switching bands */
|
|
||||||
CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
|
|
||||||
active->flags & RXON_FLG_BAND_24G_MSK);
|
|
||||||
|
|
||||||
/* Check if we are switching association toggle */
|
|
||||||
CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
|
|
||||||
active->filter_flags & RXON_FILTER_ASSOC_MSK);
|
|
||||||
|
|
||||||
#undef CHK
|
|
||||||
#undef CHK_NEQ
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void _iwl_set_rxon_ht(struct iwl_priv *priv,
|
static void _iwl_set_rxon_ht(struct iwl_priv *priv,
|
||||||
struct iwl_ht_config *ht_conf,
|
struct iwl_ht_config *ht_conf,
|
||||||
struct iwl_rxon_context *ctx)
|
struct iwl_rxon_context *ctx)
|
||||||
@ -595,46 +162,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
|
|||||||
_iwl_set_rxon_ht(priv, ht_conf, ctx);
|
_iwl_set_rxon_ht(priv, ht_conf, ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return valid, unused, channel for a passive scan to reset the RF */
|
|
||||||
u8 iwl_get_single_channel_number(struct iwl_priv *priv,
|
|
||||||
enum ieee80211_band band)
|
|
||||||
{
|
|
||||||
const struct iwl_channel_info *ch_info;
|
|
||||||
int i;
|
|
||||||
u8 channel = 0;
|
|
||||||
u8 min, max;
|
|
||||||
struct iwl_rxon_context *ctx;
|
|
||||||
|
|
||||||
if (band == IEEE80211_BAND_5GHZ) {
|
|
||||||
min = 14;
|
|
||||||
max = priv->channel_count;
|
|
||||||
} else {
|
|
||||||
min = 0;
|
|
||||||
max = 14;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = min; i < max; i++) {
|
|
||||||
bool busy = false;
|
|
||||||
|
|
||||||
for_each_context(priv, ctx) {
|
|
||||||
busy = priv->channel_info[i].channel ==
|
|
||||||
le16_to_cpu(ctx->staging.channel);
|
|
||||||
if (busy)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (busy)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
channel = priv->channel_info[i].channel;
|
|
||||||
ch_info = iwl_get_channel_info(priv, band, channel);
|
|
||||||
if (is_channel_valid(ch_info))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return channel;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iwl_set_rxon_channel - Set the band and channel values in staging RXON
|
* iwl_set_rxon_channel - Set the band and channel values in staging RXON
|
||||||
* @ch: requested channel as a pointer to struct ieee80211_channel
|
* @ch: requested channel as a pointer to struct ieee80211_channel
|
||||||
@ -828,7 +355,7 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
||||||
{
|
{
|
||||||
unsigned int reload_msec;
|
unsigned int reload_msec;
|
||||||
unsigned long reload_jiffies;
|
unsigned long reload_jiffies;
|
||||||
@ -842,7 +369,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
|||||||
priv->ucode_loaded = false;
|
priv->ucode_loaded = false;
|
||||||
|
|
||||||
/* Set the FW error flag -- cleared on iwl_down */
|
/* Set the FW error flag -- cleared on iwl_down */
|
||||||
set_bit(STATUS_FW_ERROR, &priv->shrd->status);
|
set_bit(STATUS_FW_ERROR, &priv->status);
|
||||||
|
|
||||||
/* Cancel currently queued command. */
|
/* Cancel currently queued command. */
|
||||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
|
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
|
||||||
@ -1451,13 +978,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
|
|||||||
return cpu_to_le32(res);
|
return cpu_to_le32(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_nic_error(struct iwl_op_mode *op_mode)
|
|
||||||
{
|
|
||||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
|
||||||
|
|
||||||
iwlagn_fw_error(priv, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
|
void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
|
||||||
{
|
{
|
||||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||||
|
@ -93,18 +93,12 @@ struct iwl_lib_ops {
|
|||||||
* L i b *
|
* L i b *
|
||||||
***************************/
|
***************************/
|
||||||
|
|
||||||
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|
||||||
int hw_decrypt);
|
|
||||||
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
|
|
||||||
int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
|
|
||||||
void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
|
void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
|
||||||
struct iwl_rxon_context *ctx);
|
struct iwl_rxon_context *ctx);
|
||||||
void iwl_set_flags_for_band(struct iwl_priv *priv,
|
void iwl_set_flags_for_band(struct iwl_priv *priv,
|
||||||
struct iwl_rxon_context *ctx,
|
struct iwl_rxon_context *ctx,
|
||||||
enum ieee80211_band band,
|
enum ieee80211_band band,
|
||||||
struct ieee80211_vif *vif);
|
struct ieee80211_vif *vif);
|
||||||
u8 iwl_get_single_channel_number(struct iwl_priv *priv,
|
|
||||||
enum ieee80211_band band);
|
|
||||||
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
|
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
|
||||||
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
|
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
|
||||||
struct iwl_rxon_context *ctx,
|
struct iwl_rxon_context *ctx,
|
||||||
@ -204,19 +198,10 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
|
|||||||
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
|
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
|
||||||
u32 addon, u32 beacon_interval);
|
u32 addon, u32 beacon_interval);
|
||||||
|
|
||||||
|
|
||||||
/*****************************************************
|
|
||||||
* GEOS
|
|
||||||
******************************************************/
|
|
||||||
int iwl_init_geos(struct iwl_priv *priv);
|
|
||||||
void iwl_free_geos(struct iwl_priv *priv);
|
|
||||||
|
|
||||||
extern void iwl_send_bt_config(struct iwl_priv *priv);
|
extern void iwl_send_bt_config(struct iwl_priv *priv);
|
||||||
extern int iwl_send_statistics_request(struct iwl_priv *priv,
|
extern int iwl_send_statistics_request(struct iwl_priv *priv,
|
||||||
u8 flags, bool clear);
|
u8 flags, bool clear);
|
||||||
|
|
||||||
int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
|
|
||||||
|
|
||||||
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
|
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
|
||||||
struct iwl_priv *priv, enum ieee80211_band band)
|
struct iwl_priv *priv, enum ieee80211_band band)
|
||||||
{
|
{
|
||||||
|
@ -234,7 +234,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
|||||||
IWL_ERR(priv, "No uCode has been loadded.\n");
|
IWL_ERR(priv, "No uCode has been loadded.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
img = &priv->fw->img[priv->shrd->ucode_type];
|
img = &priv->fw->img[priv->cur_ucode];
|
||||||
priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
|
priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
|
||||||
}
|
}
|
||||||
len = priv->dbgfs_sram_len;
|
len = priv->dbgfs_sram_len;
|
||||||
@ -369,14 +369,19 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
|
|||||||
i, station->sta.sta.addr,
|
i, station->sta.sta.addr,
|
||||||
station->sta.station_flags_msk);
|
station->sta.station_flags_msk);
|
||||||
pos += scnprintf(buf + pos, bufsz - pos,
|
pos += scnprintf(buf + pos, bufsz - pos,
|
||||||
"TID\tseq_num\trate_n_flags\n");
|
"TID seqno next_rclmd "
|
||||||
|
"rate_n_flags state txq\n");
|
||||||
|
|
||||||
for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
|
for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
|
||||||
tid_data = &priv->tid_data[i][j];
|
tid_data = &priv->tid_data[i][j];
|
||||||
pos += scnprintf(buf + pos, bufsz - pos,
|
pos += scnprintf(buf + pos, bufsz - pos,
|
||||||
"%d:\t%#x\t%#x",
|
"%d: 0x%.4x 0x%.4x 0x%.8x "
|
||||||
|
"%d %.2d",
|
||||||
j, tid_data->seq_number,
|
j, tid_data->seq_number,
|
||||||
tid_data->agg.rate_n_flags);
|
tid_data->next_reclaimed,
|
||||||
|
tid_data->agg.rate_n_flags,
|
||||||
|
tid_data->agg.state,
|
||||||
|
tid_data->agg.txq_id);
|
||||||
|
|
||||||
if (tid_data->agg.wait_for_ba)
|
if (tid_data->agg.wait_for_ba)
|
||||||
pos += scnprintf(buf + pos, bufsz - pos,
|
pos += scnprintf(buf + pos, bufsz - pos,
|
||||||
@ -544,9 +549,9 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
|
|||||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
|
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
|
||||||
test_bit(STATUS_SCAN_HW, &priv->status));
|
test_bit(STATUS_SCAN_HW, &priv->status));
|
||||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
|
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
|
||||||
test_bit(STATUS_POWER_PMI, &priv->shrd->status));
|
test_bit(STATUS_POWER_PMI, &priv->status));
|
||||||
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
|
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
|
||||||
test_bit(STATUS_FW_ERROR, &priv->shrd->status));
|
test_bit(STATUS_FW_ERROR, &priv->status));
|
||||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2473,6 +2478,44 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t iwl_dbgfs_log_event_read(struct file *file,
|
||||||
|
char __user *user_buf,
|
||||||
|
size_t count, loff_t *ppos)
|
||||||
|
{
|
||||||
|
struct iwl_priv *priv = file->private_data;
|
||||||
|
char *buf;
|
||||||
|
int pos = 0;
|
||||||
|
ssize_t ret = -ENOMEM;
|
||||||
|
|
||||||
|
ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
|
||||||
|
if (buf) {
|
||||||
|
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||||
|
kfree(buf);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t iwl_dbgfs_log_event_write(struct file *file,
|
||||||
|
const char __user *user_buf,
|
||||||
|
size_t count, loff_t *ppos)
|
||||||
|
{
|
||||||
|
struct iwl_priv *priv = file->private_data;
|
||||||
|
u32 event_log_flag;
|
||||||
|
char buf[8];
|
||||||
|
int buf_size;
|
||||||
|
|
||||||
|
memset(buf, 0, sizeof(buf));
|
||||||
|
buf_size = min(count, sizeof(buf) - 1);
|
||||||
|
if (copy_from_user(buf, user_buf, buf_size))
|
||||||
|
return -EFAULT;
|
||||||
|
if (sscanf(buf, "%d", &event_log_flag) != 1)
|
||||||
|
return -EFAULT;
|
||||||
|
if (event_log_flag == 1)
|
||||||
|
iwl_dump_nic_event_log(priv, true, NULL, false);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
DEBUGFS_READ_FILE_OPS(rx_statistics);
|
DEBUGFS_READ_FILE_OPS(rx_statistics);
|
||||||
DEBUGFS_READ_FILE_OPS(tx_statistics);
|
DEBUGFS_READ_FILE_OPS(tx_statistics);
|
||||||
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
|
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
|
||||||
@ -2497,6 +2540,7 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
|
|||||||
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
|
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
|
||||||
DEBUGFS_READ_FILE_OPS(reply_tx_error);
|
DEBUGFS_READ_FILE_OPS(reply_tx_error);
|
||||||
DEBUGFS_WRITE_FILE_OPS(echo_test);
|
DEBUGFS_WRITE_FILE_OPS(echo_test);
|
||||||
|
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create the debugfs files and directories
|
* Create the debugfs files and directories
|
||||||
@ -2560,6 +2604,8 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
|
|||||||
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
|
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
|
||||||
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
|
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
|
||||||
DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
|
DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
|
||||||
|
DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
|
||||||
|
|
||||||
if (iwl_advanced_bt_coexist(priv))
|
if (iwl_advanced_bt_coexist(priv))
|
||||||
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
|
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
|
||||||
|
|
||||||
|
@ -220,8 +220,7 @@ enum iwl_agg_state {
|
|||||||
* Tx response (REPLY_TX), and the block ack notification
|
* Tx response (REPLY_TX), and the block ack notification
|
||||||
* (REPLY_COMPRESSED_BA).
|
* (REPLY_COMPRESSED_BA).
|
||||||
* @state: state of the BA agreement establishment / tear down.
|
* @state: state of the BA agreement establishment / tear down.
|
||||||
* @txq_id: Tx queue used by the BA session - used by the transport layer.
|
* @txq_id: Tx queue used by the BA session
|
||||||
* Needed by the upper layer for debugfs only.
|
|
||||||
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
|
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
|
||||||
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
|
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
|
||||||
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
|
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
|
||||||
@ -623,6 +622,10 @@ struct iwl_force_reset {
|
|||||||
struct iwl_rxon_context {
|
struct iwl_rxon_context {
|
||||||
struct ieee80211_vif *vif;
|
struct ieee80211_vif *vif;
|
||||||
|
|
||||||
|
u8 mcast_queue;
|
||||||
|
u8 ac_to_queue[IEEE80211_NUM_ACS];
|
||||||
|
u8 ac_to_fifo[IEEE80211_NUM_ACS];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We could use the vif to indicate active, but we
|
* We could use the vif to indicate active, but we
|
||||||
* also need it to be active during disabling when
|
* also need it to be active during disabling when
|
||||||
@ -720,6 +723,11 @@ struct iwl_priv {
|
|||||||
|
|
||||||
unsigned long transport_queue_stop;
|
unsigned long transport_queue_stop;
|
||||||
bool passive_no_rx;
|
bool passive_no_rx;
|
||||||
|
#define IWL_INVALID_AC 0xff
|
||||||
|
u8 queue_to_ac[IWL_MAX_HW_QUEUES];
|
||||||
|
atomic_t ac_stop_count[IEEE80211_NUM_ACS];
|
||||||
|
|
||||||
|
unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||||
|
|
||||||
/* ieee device used by generic ieee processing code */
|
/* ieee device used by generic ieee processing code */
|
||||||
struct ieee80211_hw *hw;
|
struct ieee80211_hw *hw;
|
||||||
@ -731,6 +739,7 @@ struct iwl_priv {
|
|||||||
struct workqueue_struct *workqueue;
|
struct workqueue_struct *workqueue;
|
||||||
|
|
||||||
enum ieee80211_band band;
|
enum ieee80211_band band;
|
||||||
|
u8 valid_contexts;
|
||||||
|
|
||||||
void (*pre_rx_handler)(struct iwl_priv *priv,
|
void (*pre_rx_handler)(struct iwl_priv *priv,
|
||||||
struct iwl_rx_cmd_buffer *rxb);
|
struct iwl_rx_cmd_buffer *rxb);
|
||||||
@ -982,6 +991,15 @@ struct iwl_priv {
|
|||||||
__le64 replay_ctr;
|
__le64 replay_ctr;
|
||||||
__le16 last_seq_ctl;
|
__le16 last_seq_ctl;
|
||||||
bool have_rekey_data;
|
bool have_rekey_data;
|
||||||
|
|
||||||
|
/* device_pointers: pointers to ucode event tables */
|
||||||
|
struct {
|
||||||
|
u32 error_event_table;
|
||||||
|
u32 log_event_table;
|
||||||
|
} device_pointers;
|
||||||
|
|
||||||
|
/* indicator of loaded ucode image */
|
||||||
|
enum iwl_ucode_type cur_ucode;
|
||||||
}; /*iwl_priv */
|
}; /*iwl_priv */
|
||||||
|
|
||||||
extern struct kmem_cache *iwl_tx_cmd_pool;
|
extern struct kmem_cache *iwl_tx_cmd_pool;
|
||||||
@ -998,7 +1016,7 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
|
|||||||
#define for_each_context(priv, ctx) \
|
#define for_each_context(priv, ctx) \
|
||||||
for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
|
for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
|
||||||
ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
|
ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
|
||||||
if (priv->shrd->valid_contexts & BIT(ctx->ctxid))
|
if (priv->valid_contexts & BIT(ctx->ctxid))
|
||||||
|
|
||||||
static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
|
static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
|
||||||
{
|
{
|
||||||
|
@ -157,7 +157,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
|
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
|
||||||
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
|
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
|
||||||
|
IEEE80211_HW_SCAN_WHILE_IDLE;
|
||||||
|
|
||||||
if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
|
if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
|
||||||
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
||||||
@ -437,7 +438,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 base, status = 0xffffffff;
|
u32 base, status = 0xffffffff;
|
||||||
int ret = -EIO;
|
int ret = -EIO;
|
||||||
const struct fw_img *img;
|
|
||||||
|
|
||||||
IWL_DEBUG_MAC80211(priv, "enter\n");
|
IWL_DEBUG_MAC80211(priv, "enter\n");
|
||||||
mutex_lock(&priv->mutex);
|
mutex_lock(&priv->mutex);
|
||||||
@ -445,7 +445,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
|||||||
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
|
iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
|
||||||
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
|
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
|
||||||
|
|
||||||
base = priv->shrd->device_pointers.error_event_table;
|
base = priv->device_pointers.error_event_table;
|
||||||
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
if (iwlagn_hw_valid_rtc_data_addr(base)) {
|
||||||
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
|
spin_lock_irqsave(&trans(priv)->reg_lock, flags);
|
||||||
ret = iwl_grab_nic_access_silent(trans(priv));
|
ret = iwl_grab_nic_access_silent(trans(priv));
|
||||||
@ -458,6 +458,8 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
|
|||||||
|
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
|
const struct fw_img *img;
|
||||||
|
|
||||||
img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
|
img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
|
||||||
if (!priv->wowlan_sram) {
|
if (!priv->wowlan_sram) {
|
||||||
priv->wowlan_sram =
|
priv->wowlan_sram =
|
||||||
@ -653,6 +655,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
|
|||||||
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
|
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
|
||||||
break;
|
break;
|
||||||
case IEEE80211_AMPDU_TX_START:
|
case IEEE80211_AMPDU_TX_START:
|
||||||
|
if (!trans(priv)->ops->tx_agg_setup)
|
||||||
|
break;
|
||||||
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
|
if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
|
||||||
break;
|
break;
|
||||||
IWL_DEBUG_HT(priv, "start Tx\n");
|
IWL_DEBUG_HT(priv, "start Tx\n");
|
||||||
@ -1003,7 +1007,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
|
|||||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
|
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
|
if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
|
if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
|
||||||
@ -1091,7 +1095,7 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
|
|||||||
{
|
{
|
||||||
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
||||||
|
|
||||||
if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
|
if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
IWL_DEBUG_MAC80211(priv, "enter\n");
|
IWL_DEBUG_MAC80211(priv, "enter\n");
|
||||||
|
@ -75,21 +75,45 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
|
|||||||
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
|
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
|
||||||
struct iwl_rx_packet *pkt)
|
struct iwl_rx_packet *pkt)
|
||||||
{
|
{
|
||||||
|
bool triggered = false;
|
||||||
|
|
||||||
if (!list_empty(¬if_wait->notif_waits)) {
|
if (!list_empty(¬if_wait->notif_waits)) {
|
||||||
struct iwl_notification_wait *w;
|
struct iwl_notification_wait *w;
|
||||||
|
|
||||||
spin_lock(¬if_wait->notif_wait_lock);
|
spin_lock(¬if_wait->notif_wait_lock);
|
||||||
list_for_each_entry(w, ¬if_wait->notif_waits, list) {
|
list_for_each_entry(w, ¬if_wait->notif_waits, list) {
|
||||||
if (w->cmd != pkt->hdr.cmd)
|
int i;
|
||||||
|
bool found = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If it already finished (triggered) or has been
|
||||||
|
* aborted then don't evaluate it again to avoid races,
|
||||||
|
* Otherwise the function could be called again even
|
||||||
|
* though it returned true before
|
||||||
|
*/
|
||||||
|
if (w->triggered || w->aborted)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
for (i = 0; i < w->n_cmds; i++) {
|
||||||
|
if (w->cmds[i] == pkt->hdr.cmd) {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!found)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
|
||||||
w->triggered = true;
|
w->triggered = true;
|
||||||
if (w->fn)
|
triggered = true;
|
||||||
w->fn(notif_wait, pkt, w->fn_data);
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(¬if_wait->notif_wait_lock);
|
spin_unlock(¬if_wait->notif_wait_lock);
|
||||||
|
|
||||||
wake_up_all(¬if_wait->notif_waitq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (triggered)
|
||||||
|
wake_up_all(¬if_wait->notif_waitq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
|
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
|
||||||
@ -109,14 +133,18 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
|
|||||||
void
|
void
|
||||||
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
|
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
|
||||||
struct iwl_notification_wait *wait_entry,
|
struct iwl_notification_wait *wait_entry,
|
||||||
u8 cmd,
|
const u8 *cmds, int n_cmds,
|
||||||
void (*fn)(struct iwl_notif_wait_data *notif_wait,
|
bool (*fn)(struct iwl_notif_wait_data *notif_wait,
|
||||||
struct iwl_rx_packet *pkt, void *data),
|
struct iwl_rx_packet *pkt, void *data),
|
||||||
void *fn_data)
|
void *fn_data)
|
||||||
{
|
{
|
||||||
|
if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
|
||||||
|
n_cmds = MAX_NOTIF_CMDS;
|
||||||
|
|
||||||
wait_entry->fn = fn;
|
wait_entry->fn = fn;
|
||||||
wait_entry->fn_data = fn_data;
|
wait_entry->fn_data = fn_data;
|
||||||
wait_entry->cmd = cmd;
|
wait_entry->n_cmds = n_cmds;
|
||||||
|
memcpy(wait_entry->cmds, cmds, n_cmds);
|
||||||
wait_entry->triggered = false;
|
wait_entry->triggered = false;
|
||||||
wait_entry->aborted = false;
|
wait_entry->aborted = false;
|
||||||
|
|
||||||
|
@ -72,11 +72,19 @@ struct iwl_notif_wait_data {
|
|||||||
wait_queue_head_t notif_waitq;
|
wait_queue_head_t notif_waitq;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define MAX_NOTIF_CMDS 5
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_notification_wait - notification wait entry
|
* struct iwl_notification_wait - notification wait entry
|
||||||
* @list: list head for global list
|
* @list: list head for global list
|
||||||
* @fn: function called with the notification
|
* @fn: Function called with the notification. If the function
|
||||||
* @cmd: command ID
|
* returns true, the wait is over, if it returns false then
|
||||||
|
* the waiter stays blocked. If no function is given, any
|
||||||
|
* of the listed commands will unblock the waiter.
|
||||||
|
* @cmds: command IDs
|
||||||
|
* @n_cmds: number of command IDs
|
||||||
|
* @triggered: waiter should be woken up
|
||||||
|
* @aborted: wait was aborted
|
||||||
*
|
*
|
||||||
* This structure is not used directly, to wait for a
|
* This structure is not used directly, to wait for a
|
||||||
* notification declare it on the stack, and call
|
* notification declare it on the stack, and call
|
||||||
@ -93,11 +101,12 @@ struct iwl_notif_wait_data {
|
|||||||
struct iwl_notification_wait {
|
struct iwl_notification_wait {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
|
||||||
void (*fn)(struct iwl_notif_wait_data *notif_data,
|
bool (*fn)(struct iwl_notif_wait_data *notif_data,
|
||||||
struct iwl_rx_packet *pkt, void *data);
|
struct iwl_rx_packet *pkt, void *data);
|
||||||
void *fn_data;
|
void *fn_data;
|
||||||
|
|
||||||
u8 cmd;
|
u8 cmds[MAX_NOTIF_CMDS];
|
||||||
|
u8 n_cmds;
|
||||||
bool triggered, aborted;
|
bool triggered, aborted;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -112,8 +121,8 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
|
|||||||
void __acquires(wait_entry)
|
void __acquires(wait_entry)
|
||||||
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
|
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
|
||||||
struct iwl_notification_wait *wait_entry,
|
struct iwl_notification_wait *wait_entry,
|
||||||
u8 cmd,
|
const u8 *cmds, int n_cmds,
|
||||||
void (*fn)(struct iwl_notif_wait_data *notif_data,
|
bool (*fn)(struct iwl_notif_wait_data *notif_data,
|
||||||
struct iwl_rx_packet *pkt, void *data),
|
struct iwl_rx_packet *pkt, void *data),
|
||||||
void *fn_data);
|
void *fn_data);
|
||||||
|
|
||||||
|
@ -111,10 +111,10 @@ struct iwl_fw;
|
|||||||
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
|
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
|
||||||
* HCMD the this Rx responds to.
|
* HCMD the this Rx responds to.
|
||||||
* Must be atomic.
|
* Must be atomic.
|
||||||
* @queue_full: notifies that a HW queue is full. Ac is the ac of the queue
|
* @queue_full: notifies that a HW queue is full.
|
||||||
* Must be atomic
|
* Must be atomic
|
||||||
* @queue_not_full: notifies that a HW queue is not full any more.
|
* @queue_not_full: notifies that a HW queue is not full any more.
|
||||||
* Ac is the ac of the queue. Must be atomic
|
* Must be atomic
|
||||||
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
|
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
|
||||||
* the radio is killed. Must be atomic.
|
* the radio is killed. Must be atomic.
|
||||||
* @free_skb: allows the transport layer to free skbs that haven't been
|
* @free_skb: allows the transport layer to free skbs that haven't been
|
||||||
@ -132,8 +132,8 @@ struct iwl_op_mode_ops {
|
|||||||
void (*stop)(struct iwl_op_mode *op_mode);
|
void (*stop)(struct iwl_op_mode *op_mode);
|
||||||
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
|
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
|
||||||
struct iwl_device_cmd *cmd);
|
struct iwl_device_cmd *cmd);
|
||||||
void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac);
|
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
|
||||||
void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac);
|
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
|
||||||
void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
|
void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
|
||||||
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
|
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
|
||||||
void (*nic_error)(struct iwl_op_mode *op_mode);
|
void (*nic_error)(struct iwl_op_mode *op_mode);
|
||||||
@ -169,15 +169,16 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
|
|||||||
return op_mode->ops->rx(op_mode, rxb, cmd);
|
return op_mode->ops->rx(op_mode, rxb, cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, u8 ac)
|
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
|
||||||
|
int queue)
|
||||||
{
|
{
|
||||||
op_mode->ops->queue_full(op_mode, ac);
|
op_mode->ops->queue_full(op_mode, queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
|
static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
|
||||||
u8 ac)
|
int queue)
|
||||||
{
|
{
|
||||||
op_mode->ops->queue_not_full(op_mode, ac);
|
op_mode->ops->queue_not_full(op_mode, queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
|
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
|
||||||
|
@ -60,6 +60,9 @@
|
|||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*
|
*
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/pci-aspm.h>
|
#include <linux/pci-aspm.h>
|
||||||
|
273
drivers/net/wireless/iwlwifi/iwl-phy-db.c
Normal file
273
drivers/net/wireless/iwlwifi/iwl-phy-db.c
Normal file
@ -0,0 +1,273 @@
|
|||||||
|
/******************************************************************************
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||||
|
* USA
|
||||||
|
*
|
||||||
|
* The full GNU General Public License is included in this distribution
|
||||||
|
* in the file called LICENSE.GPL.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||||
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
|
||||||
|
#include "iwl-debug.h"
|
||||||
|
#include "iwl-shared.h"
|
||||||
|
#include "iwl-dev.h"
|
||||||
|
|
||||||
|
#include "iwl-phy-db.h"
|
||||||
|
|
||||||
|
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
|
||||||
|
|
||||||
|
struct iwl_phy_db *iwl_phy_db_init(struct iwl_shared *shrd)
|
||||||
|
{
|
||||||
|
struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
|
||||||
|
GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!phy_db)
|
||||||
|
return phy_db;
|
||||||
|
|
||||||
|
phy_db->shrd = shrd;
|
||||||
|
|
||||||
|
/* TODO: add default values of the phy db. */
|
||||||
|
return phy_db;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get phy db section: returns a pointer to a phy db section specified by
|
||||||
|
* type and channel group id.
|
||||||
|
*/
|
||||||
|
static struct iwl_phy_db_entry *
|
||||||
|
iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
|
||||||
|
enum iwl_phy_db_section_type type,
|
||||||
|
u16 chg_id)
|
||||||
|
{
|
||||||
|
if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
switch (type) {
|
||||||
|
case IWL_PHY_DB_CFG:
|
||||||
|
return &phy_db->cfg;
|
||||||
|
case IWL_PHY_DB_CALIB_NCH:
|
||||||
|
return &phy_db->calib_nch;
|
||||||
|
case IWL_PHY_DB_CALIB_CH:
|
||||||
|
return &phy_db->calib_ch;
|
||||||
|
case IWL_PHY_DB_CALIB_CHG_PAPD:
|
||||||
|
if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
|
||||||
|
return NULL;
|
||||||
|
return &phy_db->calib_ch_group_papd[chg_id];
|
||||||
|
case IWL_PHY_DB_CALIB_CHG_TXP:
|
||||||
|
if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
|
||||||
|
return NULL;
|
||||||
|
return &phy_db->calib_ch_group_txp[chg_id];
|
||||||
|
default:
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
|
||||||
|
enum iwl_phy_db_section_type type,
|
||||||
|
u16 chg_id)
|
||||||
|
{
|
||||||
|
struct iwl_phy_db_entry *entry =
|
||||||
|
iwl_phy_db_get_section(phy_db, type, chg_id);
|
||||||
|
if (!entry)
|
||||||
|
return;
|
||||||
|
|
||||||
|
kfree(entry->data);
|
||||||
|
entry->data = NULL;
|
||||||
|
entry->size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void iwl_phy_db_free(struct iwl_phy_db *phy_db)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (!phy_db)
|
||||||
|
return;
|
||||||
|
|
||||||
|
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
|
||||||
|
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
|
||||||
|
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
|
||||||
|
for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
|
||||||
|
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
|
||||||
|
for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
|
||||||
|
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
|
||||||
|
|
||||||
|
kfree(phy_db);
|
||||||
|
}
|
||||||
|
|
||||||
|
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
|
||||||
|
enum iwl_phy_db_section_type type, u8 *data,
|
||||||
|
u16 size, gfp_t alloc_ctx)
|
||||||
|
{
|
||||||
|
struct iwl_phy_db_entry *entry;
|
||||||
|
u16 chg_id = 0;
|
||||||
|
|
||||||
|
if (!phy_db)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
|
||||||
|
type == IWL_PHY_DB_CALIB_CHG_TXP)
|
||||||
|
chg_id = le16_to_cpup((__le16 *)data);
|
||||||
|
|
||||||
|
entry = iwl_phy_db_get_section(phy_db, type, chg_id);
|
||||||
|
if (!entry)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
kfree(entry->data);
|
||||||
|
entry->data = kmemdup(data, size, alloc_ctx);
|
||||||
|
if (!entry->data) {
|
||||||
|
entry->size = 0;
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
entry->size = size;
|
||||||
|
|
||||||
|
if (type == IWL_PHY_DB_CALIB_CH) {
|
||||||
|
phy_db->channel_num = le32_to_cpup((__le32 *)data);
|
||||||
|
phy_db->channel_size =
|
||||||
|
(size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int is_valid_channel(u16 ch_id)
|
||||||
|
{
|
||||||
|
if (ch_id <= 14 ||
|
||||||
|
(36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
|
||||||
|
(100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
|
||||||
|
(145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u8 ch_id_to_ch_index(u16 ch_id)
|
||||||
|
{
|
||||||
|
if (WARN_ON(!is_valid_channel(ch_id)))
|
||||||
|
return 0xff;
|
||||||
|
|
||||||
|
if (ch_id <= 14)
|
||||||
|
return ch_id - 1;
|
||||||
|
if (ch_id <= 64)
|
||||||
|
return (ch_id + 20) / 4;
|
||||||
|
if (ch_id <= 140)
|
||||||
|
return (ch_id - 12) / 4;
|
||||||
|
return (ch_id - 13) / 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static u16 channel_id_to_papd(u16 ch_id)
|
||||||
|
{
|
||||||
|
if (WARN_ON(!is_valid_channel(ch_id)))
|
||||||
|
return 0xff;
|
||||||
|
|
||||||
|
if (1 <= ch_id && ch_id <= 14)
|
||||||
|
return 0;
|
||||||
|
if (36 <= ch_id && ch_id <= 64)
|
||||||
|
return 1;
|
||||||
|
if (100 <= ch_id && ch_id <= 140)
|
||||||
|
return 2;
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
|
||||||
|
{
|
||||||
|
/* TODO David*/
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
|
||||||
|
enum iwl_phy_db_section_type type, u8 **data,
|
||||||
|
u16 *size, u16 ch_id)
|
||||||
|
{
|
||||||
|
struct iwl_phy_db_entry *entry;
|
||||||
|
u32 channel_num;
|
||||||
|
u32 channel_size;
|
||||||
|
u16 ch_group_id = 0;
|
||||||
|
u16 index;
|
||||||
|
|
||||||
|
if (!phy_db)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* find wanted channel group */
|
||||||
|
if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
|
||||||
|
ch_group_id = channel_id_to_papd(ch_id);
|
||||||
|
else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
|
||||||
|
ch_group_id = channel_id_to_txp(phy_db, ch_id);
|
||||||
|
|
||||||
|
entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
|
||||||
|
if (!entry)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (type == IWL_PHY_DB_CALIB_CH) {
|
||||||
|
index = ch_id_to_ch_index(ch_id);
|
||||||
|
channel_num = phy_db->channel_num;
|
||||||
|
channel_size = phy_db->channel_size;
|
||||||
|
if (index >= channel_num) {
|
||||||
|
IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
*data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
|
||||||
|
*size = channel_size;
|
||||||
|
} else {
|
||||||
|
*data = entry->data;
|
||||||
|
*size = entry->size;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
123
drivers/net/wireless/iwlwifi/iwl-phy-db.h
Normal file
123
drivers/net/wireless/iwlwifi/iwl-phy-db.h
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
/******************************************************************************
|
||||||
|
*
|
||||||
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||||
|
* redistributing this file, you may do so under either license.
|
||||||
|
*
|
||||||
|
* GPL LICENSE SUMMARY
|
||||||
|
*
|
||||||
|
* Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of version 2 of the GNU General Public License as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||||
|
* USA
|
||||||
|
*
|
||||||
|
* The full GNU General Public License is included in this distribution
|
||||||
|
* in the file called LICENSE.GPL.
|
||||||
|
*
|
||||||
|
* Contact Information:
|
||||||
|
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||||
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||||
|
*
|
||||||
|
* BSD LICENSE
|
||||||
|
*
|
||||||
|
* Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
* are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* * Neither the name Intel Corporation nor the names of its
|
||||||
|
* contributors may be used to endorse or promote products derived
|
||||||
|
* from this software without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
#ifndef __IWL_PHYDB_H__
|
||||||
|
#define __IWL_PHYDB_H__
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#define IWL_NUM_PAPD_CH_GROUPS 4
|
||||||
|
#define IWL_NUM_TXP_CH_GROUPS 8
|
||||||
|
|
||||||
|
struct iwl_phy_db_entry {
|
||||||
|
u16 size;
|
||||||
|
u8 *data;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct iwl_shared;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct iwl_phy_db - stores phy configuration and calibration data.
|
||||||
|
*
|
||||||
|
* @cfg: phy configuration.
|
||||||
|
* @calib_nch: non channel specific calibration data.
|
||||||
|
* @calib_ch: channel specific calibration data.
|
||||||
|
* @calib_ch_group_papd: calibration data related to papd channel group.
|
||||||
|
* @calib_ch_group_txp: calibration data related to tx power chanel group.
|
||||||
|
*/
|
||||||
|
struct iwl_phy_db {
|
||||||
|
struct iwl_phy_db_entry cfg;
|
||||||
|
struct iwl_phy_db_entry calib_nch;
|
||||||
|
struct iwl_phy_db_entry calib_ch;
|
||||||
|
struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
|
||||||
|
struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
|
||||||
|
|
||||||
|
u32 channel_num;
|
||||||
|
u32 channel_size;
|
||||||
|
|
||||||
|
/* for an access to the logger */
|
||||||
|
const struct iwl_shared *shrd;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum iwl_phy_db_section_type {
|
||||||
|
IWL_PHY_DB_CFG = 1,
|
||||||
|
IWL_PHY_DB_CALIB_NCH,
|
||||||
|
IWL_PHY_DB_CALIB_CH,
|
||||||
|
IWL_PHY_DB_CALIB_CHG_PAPD,
|
||||||
|
IWL_PHY_DB_CALIB_CHG_TXP,
|
||||||
|
IWL_PHY_DB_MAX
|
||||||
|
};
|
||||||
|
|
||||||
|
struct iwl_phy_db *iwl_phy_db_init(struct iwl_shared *shrd);
|
||||||
|
|
||||||
|
void iwl_phy_db_free(struct iwl_phy_db *phy_db);
|
||||||
|
|
||||||
|
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
|
||||||
|
enum iwl_phy_db_section_type type, u8 *data,
|
||||||
|
u16 size, gfp_t alloc_ctx);
|
||||||
|
|
||||||
|
int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
|
||||||
|
enum iwl_phy_db_section_type type, u8 **data,
|
||||||
|
u16 *size, u16 ch_id);
|
||||||
|
|
||||||
|
#endif /* __IWL_PHYDB_H__ */
|
@ -403,12 +403,12 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
|
if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
|
||||||
set_bit(STATUS_POWER_PMI, &priv->shrd->status);
|
iwl_dvm_set_pmi(priv, true);
|
||||||
|
|
||||||
ret = iwl_set_power(priv, cmd);
|
ret = iwl_set_power(priv, cmd);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
|
if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
|
||||||
clear_bit(STATUS_POWER_PMI, &priv->shrd->status);
|
iwl_dvm_set_pmi(priv, false);
|
||||||
|
|
||||||
if (update_chains)
|
if (update_chains)
|
||||||
iwl_update_chain_flags(priv);
|
iwl_update_chain_flags(priv);
|
||||||
|
@ -69,7 +69,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
|
|||||||
if (!test_bit(STATUS_READY, &priv->status) ||
|
if (!test_bit(STATUS_READY, &priv->status) ||
|
||||||
!test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
|
!test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
|
||||||
!test_bit(STATUS_SCAN_HW, &priv->status) ||
|
!test_bit(STATUS_SCAN_HW, &priv->status) ||
|
||||||
test_bit(STATUS_FW_ERROR, &priv->shrd->status))
|
test_bit(STATUS_FW_ERROR, &priv->status))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
ret = iwl_dvm_send_cmd(priv, &cmd);
|
ret = iwl_dvm_send_cmd(priv, &cmd);
|
||||||
@ -451,6 +451,46 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
|
|||||||
return iwl_limit_dwell(priv, passive);
|
return iwl_limit_dwell(priv, passive);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return valid, unused, channel for a passive scan to reset the RF */
|
||||||
|
static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
|
||||||
|
enum ieee80211_band band)
|
||||||
|
{
|
||||||
|
const struct iwl_channel_info *ch_info;
|
||||||
|
int i;
|
||||||
|
u8 channel = 0;
|
||||||
|
u8 min, max;
|
||||||
|
struct iwl_rxon_context *ctx;
|
||||||
|
|
||||||
|
if (band == IEEE80211_BAND_5GHZ) {
|
||||||
|
min = 14;
|
||||||
|
max = priv->channel_count;
|
||||||
|
} else {
|
||||||
|
min = 0;
|
||||||
|
max = 14;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = min; i < max; i++) {
|
||||||
|
bool busy = false;
|
||||||
|
|
||||||
|
for_each_context(priv, ctx) {
|
||||||
|
busy = priv->channel_info[i].channel ==
|
||||||
|
le16_to_cpu(ctx->staging.channel);
|
||||||
|
if (busy)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (busy)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
channel = priv->channel_info[i].channel;
|
||||||
|
ch_info = iwl_get_channel_info(priv, band, channel);
|
||||||
|
if (is_channel_valid(ch_info))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return channel;
|
||||||
|
}
|
||||||
|
|
||||||
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
|
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
|
||||||
struct ieee80211_vif *vif,
|
struct ieee80211_vif *vif,
|
||||||
enum ieee80211_band band,
|
enum ieee80211_band band,
|
||||||
@ -793,9 +833,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||||||
|
|
||||||
band = priv->scan_band;
|
band = priv->scan_band;
|
||||||
|
|
||||||
if (cfg(priv)->scan_rx_antennas[band])
|
|
||||||
rx_ant = cfg(priv)->scan_rx_antennas[band];
|
|
||||||
|
|
||||||
if (band == IEEE80211_BAND_2GHZ &&
|
if (band == IEEE80211_BAND_2GHZ &&
|
||||||
cfg(priv)->bt_params &&
|
cfg(priv)->bt_params &&
|
||||||
cfg(priv)->bt_params->advanced_bt_coexist) {
|
cfg(priv)->bt_params->advanced_bt_coexist) {
|
||||||
@ -809,8 +846,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||||||
rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
|
rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
|
||||||
scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
|
scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
|
||||||
|
|
||||||
/* In power save mode use one chain, otherwise use all chains */
|
/*
|
||||||
if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
|
* In power save mode while associated use one chain,
|
||||||
|
* otherwise use all chains
|
||||||
|
*/
|
||||||
|
if (test_bit(STATUS_POWER_PMI, &priv->status) &&
|
||||||
|
!(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
|
||||||
/* rx_ant has been set to all valid chains previously */
|
/* rx_ant has been set to all valid chains previously */
|
||||||
active_chains = rx_ant &
|
active_chains = rx_ant &
|
||||||
((u8)(priv->chain_noise_data.active_chains));
|
((u8)(priv->chain_noise_data.active_chains));
|
||||||
|
@ -160,7 +160,6 @@ struct iwl_mod_params {
|
|||||||
*
|
*
|
||||||
* Holds the module parameters
|
* Holds the module parameters
|
||||||
*
|
*
|
||||||
* @num_ampdu_queues: num of ampdu queues
|
|
||||||
* @tx_chains_num: Number of TX chains
|
* @tx_chains_num: Number of TX chains
|
||||||
* @rx_chains_num: Number of RX chains
|
* @rx_chains_num: Number of RX chains
|
||||||
* @valid_tx_ant: usable antennas for TX
|
* @valid_tx_ant: usable antennas for TX
|
||||||
@ -176,7 +175,6 @@ struct iwl_mod_params {
|
|||||||
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
|
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
|
||||||
*/
|
*/
|
||||||
struct iwl_hw_params {
|
struct iwl_hw_params {
|
||||||
u8 num_ampdu_queues;
|
|
||||||
u8 tx_chains_num;
|
u8 tx_chains_num;
|
||||||
u8 rx_chains_num;
|
u8 rx_chains_num;
|
||||||
u8 valid_tx_ant;
|
u8 valid_tx_ant;
|
||||||
@ -217,7 +215,6 @@ enum iwl_led_mode {
|
|||||||
* @chain_noise_num_beacons: number of beacons used to compute chain noise
|
* @chain_noise_num_beacons: number of beacons used to compute chain noise
|
||||||
* @adv_thermal_throttle: support advance thermal throttle
|
* @adv_thermal_throttle: support advance thermal throttle
|
||||||
* @support_ct_kill_exit: support ct kill exit condition
|
* @support_ct_kill_exit: support ct kill exit condition
|
||||||
* @support_wimax_coexist: support wimax/wifi co-exist
|
|
||||||
* @plcp_delta_threshold: plcp error rate threshold used to trigger
|
* @plcp_delta_threshold: plcp error rate threshold used to trigger
|
||||||
* radio tuning when there is a high receiving plcp error rate
|
* radio tuning when there is a high receiving plcp error rate
|
||||||
* @chain_noise_scale: default chain noise scale used for gain computation
|
* @chain_noise_scale: default chain noise scale used for gain computation
|
||||||
@ -231,7 +228,6 @@ enum iwl_led_mode {
|
|||||||
struct iwl_base_params {
|
struct iwl_base_params {
|
||||||
int eeprom_size;
|
int eeprom_size;
|
||||||
int num_of_queues; /* def: HW dependent */
|
int num_of_queues; /* def: HW dependent */
|
||||||
int num_of_ampdu_queues;/* def: HW dependent */
|
|
||||||
/* for iwl_apm_init() */
|
/* for iwl_apm_init() */
|
||||||
u32 pll_cfg_val;
|
u32 pll_cfg_val;
|
||||||
|
|
||||||
@ -240,7 +236,6 @@ struct iwl_base_params {
|
|||||||
u16 led_compensation;
|
u16 led_compensation;
|
||||||
bool adv_thermal_throttle;
|
bool adv_thermal_throttle;
|
||||||
bool support_ct_kill_exit;
|
bool support_ct_kill_exit;
|
||||||
const bool support_wimax_coexist;
|
|
||||||
u8 plcp_delta_threshold;
|
u8 plcp_delta_threshold;
|
||||||
s32 chain_noise_scale;
|
s32 chain_noise_scale;
|
||||||
unsigned int wd_timeout;
|
unsigned int wd_timeout;
|
||||||
@ -299,21 +294,15 @@ struct iwl_ht_params {
|
|||||||
* @need_temp_offset_calib: need to perform temperature offset calibration
|
* @need_temp_offset_calib: need to perform temperature offset calibration
|
||||||
* @no_xtal_calib: some devices do not need crystal calibration data,
|
* @no_xtal_calib: some devices do not need crystal calibration data,
|
||||||
* don't send it to those
|
* don't send it to those
|
||||||
* @scan_rx_antennas: available antenna for scan operation
|
|
||||||
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
|
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
|
||||||
* @adv_pm: advance power management
|
* @adv_pm: advance power management
|
||||||
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
|
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
|
||||||
* @internal_wimax_coex: internal wifi/wimax combo device
|
* @internal_wimax_coex: internal wifi/wimax combo device
|
||||||
* @iq_invert: I/Q inversion
|
|
||||||
* @temp_offset_v2: support v2 of temperature offset calibration
|
* @temp_offset_v2: support v2 of temperature offset calibration
|
||||||
*
|
*
|
||||||
* We enable the driver to be backward compatible wrt API version. The
|
* We enable the driver to be backward compatible wrt. hardware features.
|
||||||
* driver specifies which APIs it supports (with @ucode_api_max being the
|
* API differences in uCode shouldn't be handled here but through TLVs
|
||||||
* highest and @ucode_api_min the lowest). Firmware will only be loaded if
|
* and/or the uCode API version instead.
|
||||||
* it has a supported API version.
|
|
||||||
*
|
|
||||||
* The ideal usage of this infrastructure is to treat a new ucode API
|
|
||||||
* release as a new hardware revision.
|
|
||||||
*/
|
*/
|
||||||
struct iwl_cfg {
|
struct iwl_cfg {
|
||||||
/* params specific to an individual device within a device family */
|
/* params specific to an individual device within a device family */
|
||||||
@ -337,12 +326,10 @@ struct iwl_cfg {
|
|||||||
const struct iwl_bt_params *bt_params;
|
const struct iwl_bt_params *bt_params;
|
||||||
const bool need_temp_offset_calib; /* if used set to true */
|
const bool need_temp_offset_calib; /* if used set to true */
|
||||||
const bool no_xtal_calib;
|
const bool no_xtal_calib;
|
||||||
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
|
|
||||||
enum iwl_led_mode led_mode;
|
enum iwl_led_mode led_mode;
|
||||||
const bool adv_pm;
|
const bool adv_pm;
|
||||||
const bool rx_with_siso_diversity;
|
const bool rx_with_siso_diversity;
|
||||||
const bool internal_wimax_coex;
|
const bool internal_wimax_coex;
|
||||||
const bool iq_invert;
|
|
||||||
const bool temp_offset_v2;
|
const bool temp_offset_v2;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -351,7 +338,6 @@ struct iwl_cfg {
|
|||||||
*
|
*
|
||||||
* @status: STATUS_*
|
* @status: STATUS_*
|
||||||
* @wowlan: are we running wowlan uCode
|
* @wowlan: are we running wowlan uCode
|
||||||
* @valid_contexts: microcode/device supports multiple contexts
|
|
||||||
* @bus: pointer to the bus layer data
|
* @bus: pointer to the bus layer data
|
||||||
* @cfg: see struct iwl_cfg
|
* @cfg: see struct iwl_cfg
|
||||||
* @priv: pointer to the upper layer data
|
* @priv: pointer to the upper layer data
|
||||||
@ -360,30 +346,18 @@ struct iwl_cfg {
|
|||||||
* @hw_params: see struct iwl_hw_params
|
* @hw_params: see struct iwl_hw_params
|
||||||
* @lock: protect general shared data
|
* @lock: protect general shared data
|
||||||
* @eeprom: pointer to the eeprom/OTP image
|
* @eeprom: pointer to the eeprom/OTP image
|
||||||
* @ucode_type: indicator of loaded ucode image
|
|
||||||
* @device_pointers: pointers to ucode event tables
|
|
||||||
*/
|
*/
|
||||||
struct iwl_shared {
|
struct iwl_shared {
|
||||||
unsigned long status;
|
unsigned long status;
|
||||||
u8 valid_contexts;
|
|
||||||
|
|
||||||
const struct iwl_cfg *cfg;
|
const struct iwl_cfg *cfg;
|
||||||
struct iwl_trans *trans;
|
struct iwl_trans *trans;
|
||||||
void *drv;
|
void *drv;
|
||||||
struct iwl_hw_params hw_params;
|
struct iwl_hw_params hw_params;
|
||||||
const struct iwl_fw *fw;
|
|
||||||
|
|
||||||
/* eeprom -- this is in the card's little endian byte order */
|
/* eeprom -- this is in the card's little endian byte order */
|
||||||
u8 *eeprom;
|
u8 *eeprom;
|
||||||
|
|
||||||
/* ucode related variables */
|
|
||||||
enum iwl_ucode_type ucode_type;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
u32 error_event_table;
|
|
||||||
u32 log_event_table;
|
|
||||||
} device_pointers;
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */
|
/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */
|
||||||
|
@ -423,10 +423,13 @@ nla_put_failure:
|
|||||||
static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
|
static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
struct iwl_notification_wait calib_wait;
|
struct iwl_notification_wait calib_wait;
|
||||||
|
static const u8 calib_complete[] = {
|
||||||
|
CALIBRATION_COMPLETE_NOTIFICATION
|
||||||
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
||||||
CALIBRATION_COMPLETE_NOTIFICATION,
|
calib_complete, ARRAY_SIZE(calib_complete),
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
ret = iwl_init_alive_start(priv);
|
ret = iwl_init_alive_start(priv);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -605,11 +608,11 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||||||
IWL_ERR(priv, "No uCode has not been loaded\n");
|
IWL_ERR(priv, "No uCode has not been loaded\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
img = &priv->fw->img[priv->shrd->ucode_type];
|
img = &priv->fw->img[priv->cur_ucode];
|
||||||
inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
|
inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
|
||||||
data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
|
data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
|
||||||
}
|
}
|
||||||
if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type) ||
|
if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
|
||||||
nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
|
nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
|
||||||
nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
|
nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
@ -136,13 +136,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
|
|||||||
return --index & (n_bd - 1);
|
return --index & (n_bd - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This queue number is required for proper operation
|
|
||||||
* because the ucode will stop/start the scheduler as
|
|
||||||
* required.
|
|
||||||
*/
|
|
||||||
#define IWL_IPAN_MCAST_QUEUE 8
|
|
||||||
|
|
||||||
struct iwl_cmd_meta {
|
struct iwl_cmd_meta {
|
||||||
/* only for SYNC commands, iff the reply skb is wanted */
|
/* only for SYNC commands, iff the reply skb is wanted */
|
||||||
struct iwl_host_cmd *source;
|
struct iwl_host_cmd *source;
|
||||||
@ -199,9 +192,6 @@ struct iwl_queue {
|
|||||||
* lock: queue lock
|
* lock: queue lock
|
||||||
* @time_stamp: time (in jiffies) of last read_ptr change
|
* @time_stamp: time (in jiffies) of last read_ptr change
|
||||||
* @need_update: indicates need to update read/write index
|
* @need_update: indicates need to update read/write index
|
||||||
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
|
|
||||||
* @sta_id: valid if sched_retry is set
|
|
||||||
* @tid: valid if sched_retry is set
|
|
||||||
*
|
*
|
||||||
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
|
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
|
||||||
* descriptors) and required locking structures.
|
* descriptors) and required locking structures.
|
||||||
@ -218,12 +208,7 @@ struct iwl_tx_queue {
|
|||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
unsigned long time_stamp;
|
unsigned long time_stamp;
|
||||||
u8 need_update;
|
u8 need_update;
|
||||||
u8 sched_retry;
|
|
||||||
u8 active;
|
u8 active;
|
||||||
u8 swq_id;
|
|
||||||
|
|
||||||
u16 sta_id;
|
|
||||||
u16 tid;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -236,13 +221,6 @@ struct iwl_tx_queue {
|
|||||||
* @scd_base_addr: scheduler sram base address in SRAM
|
* @scd_base_addr: scheduler sram base address in SRAM
|
||||||
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
* @scd_bc_tbls: pointer to the byte count table of the scheduler
|
||||||
* @kw: keep warm address
|
* @kw: keep warm address
|
||||||
* @ac_to_fifo: to what fifo is a specifc AC mapped ?
|
|
||||||
* @ac_to_queue: to what tx queue is a specifc AC mapped ?
|
|
||||||
* @mcast_queue:
|
|
||||||
* @txq: Tx DMA processing queues
|
|
||||||
* @txq_ctx_active_msk: what queue is active
|
|
||||||
* queue_stopped: tracks what queue is stopped
|
|
||||||
* queue_stop_count: tracks what SW queue is stopped
|
|
||||||
* @pci_dev: basic pci-network driver stuff
|
* @pci_dev: basic pci-network driver stuff
|
||||||
* @hw_base: pci hardware address support
|
* @hw_base: pci hardware address support
|
||||||
* @ucode_write_complete: indicates that the ucode has been copied.
|
* @ucode_write_complete: indicates that the ucode has been copied.
|
||||||
@ -272,16 +250,9 @@ struct iwl_trans_pcie {
|
|||||||
struct iwl_dma_ptr scd_bc_tbls;
|
struct iwl_dma_ptr scd_bc_tbls;
|
||||||
struct iwl_dma_ptr kw;
|
struct iwl_dma_ptr kw;
|
||||||
|
|
||||||
const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
|
|
||||||
const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
|
|
||||||
u8 mcast_queue[NUM_IWL_RXON_CTX];
|
|
||||||
u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
|
|
||||||
|
|
||||||
struct iwl_tx_queue *txq;
|
struct iwl_tx_queue *txq;
|
||||||
unsigned long txq_ctx_active_msk;
|
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||||
#define IWL_MAX_HW_QUEUES 32
|
|
||||||
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
|
||||||
atomic_t queue_stop_count[4];
|
|
||||||
|
|
||||||
/* PCI bus related data */
|
/* PCI bus related data */
|
||||||
struct pci_dev *pci_dev;
|
struct pci_dev *pci_dev;
|
||||||
@ -293,6 +264,8 @@ struct iwl_trans_pcie {
|
|||||||
u8 cmd_queue;
|
u8 cmd_queue;
|
||||||
u8 n_no_reclaim_cmds;
|
u8 n_no_reclaim_cmds;
|
||||||
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
|
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
|
||||||
|
u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
|
||||||
|
u8 n_q_to_fifo;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
|
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
|
||||||
@ -331,15 +304,12 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
|
|||||||
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
||||||
struct iwl_tx_queue *txq,
|
struct iwl_tx_queue *txq,
|
||||||
u16 byte_cnt);
|
u16 byte_cnt);
|
||||||
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
|
void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
|
||||||
int sta_id, int tid);
|
|
||||||
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
|
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
|
||||||
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||||
struct iwl_tx_queue *txq,
|
struct iwl_tx_queue *txq,
|
||||||
int tx_fifo_id, int scd_retry);
|
int tx_fifo_id, bool active);
|
||||||
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
|
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
|
||||||
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
|
||||||
enum iwl_rxon_context_id ctx,
|
|
||||||
int sta_id, int tid, int frame_limit, u16 ssn);
|
int sta_id, int tid, int frame_limit, u16 ssn);
|
||||||
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
||||||
int index, enum dma_data_direction dma_dir);
|
int index, enum dma_data_direction dma_dir);
|
||||||
@ -350,8 +320,6 @@ int iwl_queue_space(const struct iwl_queue *q);
|
|||||||
/*****************************************************
|
/*****************************************************
|
||||||
* Error handling
|
* Error handling
|
||||||
******************************************************/
|
******************************************************/
|
||||||
int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
|
|
||||||
char **buf, bool display);
|
|
||||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
|
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
|
||||||
void iwl_dump_csr(struct iwl_trans *trans);
|
void iwl_dump_csr(struct iwl_trans *trans);
|
||||||
|
|
||||||
@ -388,91 +356,28 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
|||||||
iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
|
iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* we have 8 bits used like this:
|
|
||||||
*
|
|
||||||
* 7 6 5 4 3 2 1 0
|
|
||||||
* | | | | | | | |
|
|
||||||
* | | | | | | +-+-------- AC queue (0-3)
|
|
||||||
* | | | | | |
|
|
||||||
* | +-+-+-+-+------------ HW queue ID
|
|
||||||
* |
|
|
||||||
* +---------------------- unused
|
|
||||||
*/
|
|
||||||
static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
|
|
||||||
{
|
|
||||||
BUG_ON(ac > 3); /* only have 2 bits */
|
|
||||||
BUG_ON(hwq > 31); /* only use 5 bits */
|
|
||||||
|
|
||||||
txq->swq_id = (hwq << 2) | ac;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
|
|
||||||
{
|
|
||||||
return txq->swq_id & 0x3;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
static inline void iwl_wake_queue(struct iwl_trans *trans,
|
||||||
struct iwl_tx_queue *txq)
|
struct iwl_tx_queue *txq)
|
||||||
{
|
{
|
||||||
u8 queue = txq->swq_id;
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
u8 ac = queue & 3;
|
|
||||||
u8 hwq = (queue >> 2) & 0x1f;
|
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
|
|
||||||
if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
|
if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
|
||||||
if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
|
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
|
||||||
iwl_op_mode_queue_not_full(trans->op_mode, ac);
|
iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
|
|
||||||
hwq, ac);
|
|
||||||
} else {
|
|
||||||
IWL_DEBUG_TX_QUEUES(trans,
|
|
||||||
"Don't wake hwq %d ac %d stop count %d",
|
|
||||||
hwq, ac,
|
|
||||||
atomic_read(&trans_pcie->queue_stop_count[ac]));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_stop_queue(struct iwl_trans *trans,
|
static inline void iwl_stop_queue(struct iwl_trans *trans,
|
||||||
struct iwl_tx_queue *txq)
|
struct iwl_tx_queue *txq)
|
||||||
{
|
{
|
||||||
u8 queue = txq->swq_id;
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
u8 ac = queue & 3;
|
|
||||||
u8 hwq = (queue >> 2) & 0x1f;
|
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
|
|
||||||
if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
|
if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
|
||||||
if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
|
iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
|
||||||
iwl_op_mode_queue_full(trans->op_mode, ac);
|
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
|
||||||
IWL_DEBUG_TX_QUEUES(trans,
|
} else
|
||||||
"Stop hwq %d ac %d stop count %d",
|
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
|
||||||
hwq, ac,
|
txq->q.id);
|
||||||
atomic_read(&trans_pcie->queue_stop_count[ac]));
|
|
||||||
} else {
|
|
||||||
IWL_DEBUG_TX_QUEUES(trans,
|
|
||||||
"Don't stop hwq %d ac %d stop count %d",
|
|
||||||
hwq, ac,
|
|
||||||
atomic_read(&trans_pcie->queue_stop_count[ac]));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
|
|
||||||
hwq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
|
|
||||||
int txq_id)
|
|
||||||
{
|
|
||||||
set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
|
|
||||||
int txq_id)
|
|
||||||
{
|
|
||||||
clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
|
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
|
||||||
@ -487,19 +392,4 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
|
|||||||
return index & (q->n_window - 1);
|
return index & (q->n_window - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define IWL_TX_FIFO_BK 0 /* shared */
|
|
||||||
#define IWL_TX_FIFO_BE 1
|
|
||||||
#define IWL_TX_FIFO_VI 2 /* shared */
|
|
||||||
#define IWL_TX_FIFO_VO 3
|
|
||||||
#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
|
|
||||||
#define IWL_TX_FIFO_BE_IPAN 4
|
|
||||||
#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
|
|
||||||
#define IWL_TX_FIFO_VO_IPAN 5
|
|
||||||
/* re-uses the VO FIFO, uCode will properly flush/schedule */
|
|
||||||
#define IWL_TX_FIFO_AUX 5
|
|
||||||
#define IWL_TX_FIFO_UNUSED -1
|
|
||||||
|
|
||||||
/* AUX (TX during scan dwell) queue */
|
|
||||||
#define IWL_AUX_QUEUE 10
|
|
||||||
|
|
||||||
#endif /* __iwl_trans_int_pcie_h__ */
|
#endif /* __iwl_trans_int_pcie_h__ */
|
||||||
|
@ -146,8 +146,11 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
|
|||||||
q->write_actual = (q->write & ~0x7);
|
q->write_actual = (q->write & ~0x7);
|
||||||
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
|
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
|
||||||
} else {
|
} else {
|
||||||
|
struct iwl_trans_pcie *trans_pcie =
|
||||||
|
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
|
||||||
/* If power-saving is in use, make sure device is awake */
|
/* If power-saving is in use, make sure device is awake */
|
||||||
if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
|
if (test_bit(STATUS_POWER_PMI, &trans_pcie->status)) {
|
||||||
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
|
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
|
||||||
|
|
||||||
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
|
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
|
||||||
@ -362,28 +365,36 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
|||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
|
||||||
struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
|
||||||
struct iwl_device_cmd *cmd;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int len, err;
|
bool page_stolen = false;
|
||||||
u16 sequence;
|
int max_len = PAGE_SIZE << hw_params(trans).rx_page_order;
|
||||||
struct iwl_rx_cmd_buffer rxcb;
|
u32 offset = 0;
|
||||||
struct iwl_rx_packet *pkt;
|
|
||||||
bool reclaim;
|
|
||||||
int index, cmd_index;
|
|
||||||
|
|
||||||
if (WARN_ON(!rxb))
|
if (WARN_ON(!rxb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dma_unmap_page(trans->dev, rxb->page_dma,
|
dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
|
||||||
PAGE_SIZE << hw_params(trans).rx_page_order,
|
|
||||||
DMA_FROM_DEVICE);
|
while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
|
||||||
|
struct iwl_rx_packet *pkt;
|
||||||
|
struct iwl_device_cmd *cmd;
|
||||||
|
u16 sequence;
|
||||||
|
bool reclaim;
|
||||||
|
int index, cmd_index, err, len;
|
||||||
|
struct iwl_rx_cmd_buffer rxcb = {
|
||||||
|
._offset = offset,
|
||||||
|
._page = rxb->page,
|
||||||
|
._page_stolen = false,
|
||||||
|
};
|
||||||
|
|
||||||
rxcb._page = rxb->page;
|
|
||||||
pkt = rxb_addr(&rxcb);
|
pkt = rxb_addr(&rxcb);
|
||||||
|
|
||||||
IWL_DEBUG_RX(trans, "%s, 0x%02x\n",
|
if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
|
||||||
get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
break;
|
||||||
|
|
||||||
|
IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
|
||||||
|
rxcb._offset, get_cmd_string(pkt->hdr.cmd),
|
||||||
|
pkt->hdr.cmd);
|
||||||
|
|
||||||
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||||
len += sizeof(u32); /* account for status word */
|
len += sizeof(u32); /* account for status word */
|
||||||
@ -400,7 +411,8 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
|
for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
|
||||||
if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) {
|
if (trans_pcie->no_reclaim_cmds[i] ==
|
||||||
|
pkt->hdr.cmd) {
|
||||||
reclaim = false;
|
reclaim = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -419,10 +431,8 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
|||||||
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
|
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: After here, we should always check rxcb._page
|
* After here, we should always check rxcb._page_stolen,
|
||||||
* against NULL before touching it or its virtual
|
* if it is true then one of the handlers took the page.
|
||||||
* memory (pkt). Because some rx_handler might have
|
|
||||||
* already taken or freed the pages.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (reclaim) {
|
if (reclaim) {
|
||||||
@ -430,15 +440,21 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
|
|||||||
* and fire off the (possibly) blocking
|
* and fire off the (possibly) blocking
|
||||||
* iwl_trans_send_cmd()
|
* iwl_trans_send_cmd()
|
||||||
* as we reclaim the driver command queue */
|
* as we reclaim the driver command queue */
|
||||||
if (rxcb._page)
|
if (!rxcb._page_stolen)
|
||||||
iwl_tx_cmd_complete(trans, &rxcb, err);
|
iwl_tx_cmd_complete(trans, &rxcb, err);
|
||||||
else
|
else
|
||||||
IWL_WARN(trans, "Claim null rxb?\n");
|
IWL_WARN(trans, "Claim null rxb?\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* page was stolen from us */
|
page_stolen |= rxcb._page_stolen;
|
||||||
if (rxcb._page == NULL)
|
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* page was stolen from us -- free our reference */
|
||||||
|
if (page_stolen) {
|
||||||
|
__free_pages(rxb->page, hw_params(trans).rx_page_order);
|
||||||
rxb->page = NULL;
|
rxb->page = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* Reuse the page if possible. For notification packets and
|
/* Reuse the page if possible. For notification packets and
|
||||||
* SKBs that fail to Rx correctly, add them back into the
|
* SKBs that fail to Rx correctly, add them back into the
|
||||||
@ -520,153 +536,6 @@ static void iwl_rx_handle(struct iwl_trans *trans)
|
|||||||
iwlagn_rx_queue_restock(trans);
|
iwlagn_rx_queue_restock(trans);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char * const desc_lookup_text[] = {
|
|
||||||
"OK",
|
|
||||||
"FAIL",
|
|
||||||
"BAD_PARAM",
|
|
||||||
"BAD_CHECKSUM",
|
|
||||||
"NMI_INTERRUPT_WDG",
|
|
||||||
"SYSASSERT",
|
|
||||||
"FATAL_ERROR",
|
|
||||||
"BAD_COMMAND",
|
|
||||||
"HW_ERROR_TUNE_LOCK",
|
|
||||||
"HW_ERROR_TEMPERATURE",
|
|
||||||
"ILLEGAL_CHAN_FREQ",
|
|
||||||
"VCC_NOT_STABLE",
|
|
||||||
"FH_ERROR",
|
|
||||||
"NMI_INTERRUPT_HOST",
|
|
||||||
"NMI_INTERRUPT_ACTION_PT",
|
|
||||||
"NMI_INTERRUPT_UNKNOWN",
|
|
||||||
"UCODE_VERSION_MISMATCH",
|
|
||||||
"HW_ERROR_ABS_LOCK",
|
|
||||||
"HW_ERROR_CAL_LOCK_FAIL",
|
|
||||||
"NMI_INTERRUPT_INST_ACTION_PT",
|
|
||||||
"NMI_INTERRUPT_DATA_ACTION_PT",
|
|
||||||
"NMI_TRM_HW_ER",
|
|
||||||
"NMI_INTERRUPT_TRM",
|
|
||||||
"NMI_INTERRUPT_BREAK_POINT",
|
|
||||||
"DEBUG_0",
|
|
||||||
"DEBUG_1",
|
|
||||||
"DEBUG_2",
|
|
||||||
"DEBUG_3",
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct { char *name; u8 num; } advanced_lookup[] = {
|
|
||||||
{ "NMI_INTERRUPT_WDG", 0x34 },
|
|
||||||
{ "SYSASSERT", 0x35 },
|
|
||||||
{ "UCODE_VERSION_MISMATCH", 0x37 },
|
|
||||||
{ "BAD_COMMAND", 0x38 },
|
|
||||||
{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
|
|
||||||
{ "FATAL_ERROR", 0x3D },
|
|
||||||
{ "NMI_TRM_HW_ERR", 0x46 },
|
|
||||||
{ "NMI_INTERRUPT_TRM", 0x4C },
|
|
||||||
{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
|
|
||||||
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
|
|
||||||
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
|
|
||||||
{ "NMI_INTERRUPT_HOST", 0x66 },
|
|
||||||
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
|
|
||||||
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
|
|
||||||
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
|
|
||||||
{ "ADVANCED_SYSASSERT", 0 },
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *desc_lookup(u32 num)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int max = ARRAY_SIZE(desc_lookup_text);
|
|
||||||
|
|
||||||
if (num < max)
|
|
||||||
return desc_lookup_text[num];
|
|
||||||
|
|
||||||
max = ARRAY_SIZE(advanced_lookup) - 1;
|
|
||||||
for (i = 0; i < max; i++) {
|
|
||||||
if (advanced_lookup[i].num == num)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return advanced_lookup[i].name;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ERROR_START_OFFSET (1 * sizeof(u32))
|
|
||||||
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
|
|
||||||
|
|
||||||
static void iwl_dump_nic_error_log(struct iwl_trans *trans)
|
|
||||||
{
|
|
||||||
u32 base;
|
|
||||||
struct iwl_error_event_table table;
|
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
|
|
||||||
base = trans->shrd->device_pointers.error_event_table;
|
|
||||||
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
|
|
||||||
if (!base)
|
|
||||||
base = trans->shrd->fw->init_errlog_ptr;
|
|
||||||
} else {
|
|
||||||
if (!base)
|
|
||||||
base = trans->shrd->fw->inst_errlog_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
|
|
||||||
IWL_ERR(trans,
|
|
||||||
"Not valid error log pointer 0x%08X for %s uCode\n",
|
|
||||||
base,
|
|
||||||
(trans->shrd->ucode_type == IWL_UCODE_INIT)
|
|
||||||
? "Init" : "RT");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
|
|
||||||
|
|
||||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
|
||||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
|
||||||
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
|
|
||||||
trans->shrd->status, table.valid);
|
|
||||||
}
|
|
||||||
|
|
||||||
trans_pcie->isr_stats.err_code = table.error_id;
|
|
||||||
|
|
||||||
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
|
|
||||||
table.data1, table.data2, table.line,
|
|
||||||
table.blink1, table.blink2, table.ilink1,
|
|
||||||
table.ilink2, table.bcon_time, table.gp1,
|
|
||||||
table.gp2, table.gp3, table.ucode_ver,
|
|
||||||
table.hw_ver, table.brd_ver);
|
|
||||||
IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
|
|
||||||
desc_lookup(table.error_id));
|
|
||||||
IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
|
|
||||||
IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
|
|
||||||
IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
|
|
||||||
IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
|
|
||||||
IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
|
|
||||||
IWL_ERR(trans, "0x%08X | data1\n", table.data1);
|
|
||||||
IWL_ERR(trans, "0x%08X | data2\n", table.data2);
|
|
||||||
IWL_ERR(trans, "0x%08X | line\n", table.line);
|
|
||||||
IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
|
|
||||||
IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
|
|
||||||
IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
|
|
||||||
IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
|
|
||||||
IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
|
|
||||||
IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
|
|
||||||
IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
|
|
||||||
IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
|
|
||||||
IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
|
|
||||||
IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
|
|
||||||
|
|
||||||
IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
|
|
||||||
IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
|
|
||||||
IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
|
|
||||||
IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
|
|
||||||
IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
|
|
||||||
IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
|
|
||||||
IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
|
|
||||||
IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
|
|
||||||
IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
|
|
||||||
IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
|
|
||||||
IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
|
|
||||||
IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
|
|
||||||
IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
|
|
||||||
IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iwl_irq_handle_error - called for HW or SW error interrupt from card
|
* iwl_irq_handle_error - called for HW or SW error interrupt from card
|
||||||
*/
|
*/
|
||||||
@ -689,243 +558,12 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
IWL_ERR(trans, "Loaded firmware version: %s\n",
|
|
||||||
trans->shrd->fw->fw_version);
|
|
||||||
|
|
||||||
iwl_dump_nic_error_log(trans);
|
|
||||||
iwl_dump_csr(trans);
|
iwl_dump_csr(trans);
|
||||||
iwl_dump_fh(trans, NULL, false);
|
iwl_dump_fh(trans, NULL, false);
|
||||||
iwl_dump_nic_event_log(trans, false, NULL, false);
|
|
||||||
|
|
||||||
iwl_op_mode_nic_error(trans->op_mode);
|
iwl_op_mode_nic_error(trans->op_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EVENT_START_OFFSET (4 * sizeof(u32))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* iwl_print_event_log - Dump error event log to syslog
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
|
|
||||||
u32 num_events, u32 mode,
|
|
||||||
int pos, char **buf, size_t bufsz)
|
|
||||||
{
|
|
||||||
u32 i;
|
|
||||||
u32 base; /* SRAM byte address of event log header */
|
|
||||||
u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
|
|
||||||
u32 ptr; /* SRAM byte address of log data */
|
|
||||||
u32 ev, time, data; /* event log data */
|
|
||||||
unsigned long reg_flags;
|
|
||||||
|
|
||||||
if (num_events == 0)
|
|
||||||
return pos;
|
|
||||||
|
|
||||||
base = trans->shrd->device_pointers.log_event_table;
|
|
||||||
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
|
|
||||||
if (!base)
|
|
||||||
base = trans->shrd->fw->init_evtlog_ptr;
|
|
||||||
} else {
|
|
||||||
if (!base)
|
|
||||||
base = trans->shrd->fw->inst_evtlog_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mode == 0)
|
|
||||||
event_size = 2 * sizeof(u32);
|
|
||||||
else
|
|
||||||
event_size = 3 * sizeof(u32);
|
|
||||||
|
|
||||||
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
|
|
||||||
|
|
||||||
/* Make sure device is powered up for SRAM reads */
|
|
||||||
spin_lock_irqsave(&trans->reg_lock, reg_flags);
|
|
||||||
if (unlikely(!iwl_grab_nic_access(trans)))
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
/* Set starting address; reads will auto-increment */
|
|
||||||
iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
|
|
||||||
|
|
||||||
/* "time" is actually "data" for mode 0 (no timestamp).
|
|
||||||
* place event id # at far right for easier visual parsing. */
|
|
||||||
for (i = 0; i < num_events; i++) {
|
|
||||||
ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
|
||||||
time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
|
||||||
if (mode == 0) {
|
|
||||||
/* data, ev */
|
|
||||||
if (bufsz) {
|
|
||||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
|
||||||
"EVT_LOG:0x%08x:%04u\n",
|
|
||||||
time, ev);
|
|
||||||
} else {
|
|
||||||
trace_iwlwifi_dev_ucode_event(trans->dev, 0,
|
|
||||||
time, ev);
|
|
||||||
IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
|
|
||||||
time, ev);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
|
|
||||||
if (bufsz) {
|
|
||||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
|
||||||
"EVT_LOGT:%010u:0x%08x:%04u\n",
|
|
||||||
time, data, ev);
|
|
||||||
} else {
|
|
||||||
IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
|
|
||||||
time, data, ev);
|
|
||||||
trace_iwlwifi_dev_ucode_event(trans->dev, time,
|
|
||||||
data, ev);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Allow device to power down */
|
|
||||||
iwl_release_nic_access(trans);
|
|
||||||
out_unlock:
|
|
||||||
spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
|
|
||||||
return pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
|
|
||||||
*/
|
|
||||||
static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
|
|
||||||
u32 num_wraps, u32 next_entry,
|
|
||||||
u32 size, u32 mode,
|
|
||||||
int pos, char **buf, size_t bufsz)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* display the newest DEFAULT_LOG_ENTRIES entries
|
|
||||||
* i.e the entries just before the next ont that uCode would fill.
|
|
||||||
*/
|
|
||||||
if (num_wraps) {
|
|
||||||
if (next_entry < size) {
|
|
||||||
pos = iwl_print_event_log(trans,
|
|
||||||
capacity - (size - next_entry),
|
|
||||||
size - next_entry, mode,
|
|
||||||
pos, buf, bufsz);
|
|
||||||
pos = iwl_print_event_log(trans, 0,
|
|
||||||
next_entry, mode,
|
|
||||||
pos, buf, bufsz);
|
|
||||||
} else
|
|
||||||
pos = iwl_print_event_log(trans, next_entry - size,
|
|
||||||
size, mode, pos, buf, bufsz);
|
|
||||||
} else {
|
|
||||||
if (next_entry < size) {
|
|
||||||
pos = iwl_print_event_log(trans, 0, next_entry,
|
|
||||||
mode, pos, buf, bufsz);
|
|
||||||
} else {
|
|
||||||
pos = iwl_print_event_log(trans, next_entry - size,
|
|
||||||
size, mode, pos, buf, bufsz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
|
|
||||||
|
|
||||||
int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
|
|
||||||
char **buf, bool display)
|
|
||||||
{
|
|
||||||
u32 base; /* SRAM byte address of event log header */
|
|
||||||
u32 capacity; /* event log capacity in # entries */
|
|
||||||
u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
|
|
||||||
u32 num_wraps; /* # times uCode wrapped to top of log */
|
|
||||||
u32 next_entry; /* index of next entry to be written by uCode */
|
|
||||||
u32 size; /* # entries that we'll print */
|
|
||||||
u32 logsize;
|
|
||||||
int pos = 0;
|
|
||||||
size_t bufsz = 0;
|
|
||||||
|
|
||||||
base = trans->shrd->device_pointers.log_event_table;
|
|
||||||
if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
|
|
||||||
logsize = trans->shrd->fw->init_evtlog_size;
|
|
||||||
if (!base)
|
|
||||||
base = trans->shrd->fw->init_evtlog_ptr;
|
|
||||||
} else {
|
|
||||||
logsize = trans->shrd->fw->inst_evtlog_size;
|
|
||||||
if (!base)
|
|
||||||
base = trans->shrd->fw->inst_evtlog_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!iwlagn_hw_valid_rtc_data_addr(base)) {
|
|
||||||
IWL_ERR(trans,
|
|
||||||
"Invalid event log pointer 0x%08X for %s uCode\n",
|
|
||||||
base,
|
|
||||||
(trans->shrd->ucode_type == IWL_UCODE_INIT)
|
|
||||||
? "Init" : "RT");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* event log header */
|
|
||||||
capacity = iwl_read_targ_mem(trans, base);
|
|
||||||
mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
|
|
||||||
num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
|
|
||||||
next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
|
|
||||||
|
|
||||||
if (capacity > logsize) {
|
|
||||||
IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
|
|
||||||
"entries\n", capacity, logsize);
|
|
||||||
capacity = logsize;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (next_entry > logsize) {
|
|
||||||
IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
|
|
||||||
next_entry, logsize);
|
|
||||||
next_entry = logsize;
|
|
||||||
}
|
|
||||||
|
|
||||||
size = num_wraps ? capacity : next_entry;
|
|
||||||
|
|
||||||
/* bail out if nothing in log */
|
|
||||||
if (size == 0) {
|
|
||||||
IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
|
|
||||||
return pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
||||||
if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
|
|
||||||
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
|
|
||||||
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
|
|
||||||
#else
|
|
||||||
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
|
|
||||||
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
|
|
||||||
#endif
|
|
||||||
IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
|
|
||||||
size);
|
|
||||||
|
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
||||||
if (display) {
|
|
||||||
if (full_log)
|
|
||||||
bufsz = capacity * 48;
|
|
||||||
else
|
|
||||||
bufsz = size * 48;
|
|
||||||
*buf = kmalloc(bufsz, GFP_KERNEL);
|
|
||||||
if (!*buf)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
|
|
||||||
/*
|
|
||||||
* if uCode has wrapped back to top of log,
|
|
||||||
* start at the oldest entry,
|
|
||||||
* i.e the next one that uCode would fill.
|
|
||||||
*/
|
|
||||||
if (num_wraps)
|
|
||||||
pos = iwl_print_event_log(trans, next_entry,
|
|
||||||
capacity - next_entry, mode,
|
|
||||||
pos, buf, bufsz);
|
|
||||||
/* (then/else) start at top of log */
|
|
||||||
pos = iwl_print_event_log(trans, 0,
|
|
||||||
next_entry, mode, pos, buf, bufsz);
|
|
||||||
} else
|
|
||||||
pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
|
|
||||||
next_entry, size, mode,
|
|
||||||
pos, buf, bufsz);
|
|
||||||
#else
|
|
||||||
pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
|
|
||||||
next_entry, size, mode,
|
|
||||||
pos, buf, bufsz);
|
|
||||||
#endif
|
|
||||||
return pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* tasklet for iwlagn interrupt */
|
/* tasklet for iwlagn interrupt */
|
||||||
void iwl_irq_tasklet(struct iwl_trans *trans)
|
void iwl_irq_tasklet(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
@ -963,7 +601,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
|
|||||||
if (iwl_have_debug_level(IWL_DL_ISR)) {
|
if (iwl_have_debug_level(IWL_DL_ISR)) {
|
||||||
/* just for debug */
|
/* just for debug */
|
||||||
inta_mask = iwl_read32(trans, CSR_INT_MASK);
|
inta_mask = iwl_read32(trans, CSR_INT_MASK);
|
||||||
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
|
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
|
||||||
inta, inta_mask);
|
inta, inta_mask);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -41,43 +41,6 @@
|
|||||||
#define IWL_TX_CRC_SIZE 4
|
#define IWL_TX_CRC_SIZE 4
|
||||||
#define IWL_TX_DELIMITER_SIZE 4
|
#define IWL_TX_DELIMITER_SIZE 4
|
||||||
|
|
||||||
/*
|
|
||||||
* mac80211 queues, ACs, hardware queues, FIFOs.
|
|
||||||
*
|
|
||||||
* Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
|
|
||||||
*
|
|
||||||
* Mac80211 uses the following numbers, which we get as from it
|
|
||||||
* by way of skb_get_queue_mapping(skb):
|
|
||||||
*
|
|
||||||
* VO 0
|
|
||||||
* VI 1
|
|
||||||
* BE 2
|
|
||||||
* BK 3
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Regular (not A-MPDU) frames are put into hardware queues corresponding
|
|
||||||
* to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
|
|
||||||
* own queue per aggregation session (RA/TID combination), such queues are
|
|
||||||
* set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
|
|
||||||
* order to map frames to the right queue, we also need an AC->hw queue
|
|
||||||
* mapping. This is implemented here.
|
|
||||||
*
|
|
||||||
* Due to the way hw queues are set up (by the hw specific code), the AC->hw
|
|
||||||
* queue mapping is the identity mapping.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static const u8 tid_to_ac[] = {
|
|
||||||
IEEE80211_AC_BE,
|
|
||||||
IEEE80211_AC_BK,
|
|
||||||
IEEE80211_AC_BK,
|
|
||||||
IEEE80211_AC_BE,
|
|
||||||
IEEE80211_AC_VI,
|
|
||||||
IEEE80211_AC_VI,
|
|
||||||
IEEE80211_AC_VO,
|
|
||||||
IEEE80211_AC_VO
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
|
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
|
||||||
*/
|
*/
|
||||||
@ -141,8 +104,10 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
|
|||||||
iwl_write32(trans, HBUS_TARG_WRPTR,
|
iwl_write32(trans, HBUS_TARG_WRPTR,
|
||||||
txq->q.write_ptr | (txq_id << 8));
|
txq->q.write_ptr | (txq_id << 8));
|
||||||
} else {
|
} else {
|
||||||
|
struct iwl_trans_pcie *trans_pcie =
|
||||||
|
IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
/* if we're trying to save power */
|
/* if we're trying to save power */
|
||||||
if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
|
if (test_bit(STATUS_POWER_PMI, &trans_pcie->status)) {
|
||||||
/* wake up nic if it's powered down ...
|
/* wake up nic if it's powered down ...
|
||||||
* uCode will wake up, and interrupt us again, so next
|
* uCode will wake up, and interrupt us again, so next
|
||||||
* time we'll skip this part. */
|
* time we'll skip this part. */
|
||||||
@ -448,7 +413,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
|
|||||||
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
|
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
|
||||||
int txq_id, u32 index)
|
int txq_id, u32 index)
|
||||||
{
|
{
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
|
IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
|
||||||
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
||||||
(index & 0xff) | (txq_id << 8));
|
(index & 0xff) | (txq_id << 8));
|
||||||
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
|
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
|
||||||
@ -456,12 +421,9 @@ void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
|
|||||||
|
|
||||||
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
||||||
struct iwl_tx_queue *txq,
|
struct iwl_tx_queue *txq,
|
||||||
int tx_fifo_id, int scd_retry)
|
int tx_fifo_id, bool active)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
int txq_id = txq->q.id;
|
int txq_id = txq->q.id;
|
||||||
int active =
|
|
||||||
test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
|
|
||||||
|
|
||||||
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
|
iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
|
||||||
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
||||||
@ -469,77 +431,22 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
|
|||||||
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
|
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
|
||||||
SCD_QUEUE_STTS_REG_MSK);
|
SCD_QUEUE_STTS_REG_MSK);
|
||||||
|
|
||||||
txq->sched_retry = scd_retry;
|
|
||||||
|
|
||||||
if (active)
|
if (active)
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
|
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
|
||||||
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
|
txq_id, tx_fifo_id);
|
||||||
else
|
else
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
|
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
|
||||||
scd_retry ? "BA" : "AC/CMD", txq_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int get_ac_from_tid(u16 tid)
|
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
|
||||||
|
int sta_id, int tid, int frame_limit, u16 ssn)
|
||||||
{
|
{
|
||||||
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
return tid_to_ac[tid];
|
|
||||||
|
|
||||||
/* no support for TIDs 8-15 yet */
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
|
|
||||||
u8 ctx, u16 tid)
|
|
||||||
{
|
|
||||||
const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
|
|
||||||
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
|
|
||||||
return ac_to_fifo[tid_to_ac[tid]];
|
|
||||||
|
|
||||||
/* no support for TIDs 8-15 yet */
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
|
|
||||||
{
|
|
||||||
if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
|
|
||||||
return false;
|
|
||||||
return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
|
|
||||||
hw_params(trans).num_ampdu_queues);
|
|
||||||
}
|
|
||||||
|
|
||||||
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
|
||||||
enum iwl_rxon_context_id ctx, int sta_id,
|
|
||||||
int tid, int frame_limit, u16 ssn)
|
|
||||||
{
|
|
||||||
int tx_fifo, txq_id;
|
|
||||||
u16 ra_tid;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
u16 ra_tid = BUILD_RAxTID(sta_id, tid);
|
||||||
|
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
|
||||||
|
|
||||||
if (WARN_ON(sta_id == IWL_INVALID_STATION))
|
|
||||||
return;
|
|
||||||
if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
|
|
||||||
return;
|
|
||||||
|
|
||||||
tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
|
|
||||||
if (WARN_ON(tx_fifo < 0)) {
|
|
||||||
IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
txq_id = trans_pcie->agg_txq[sta_id][tid];
|
|
||||||
if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
|
|
||||||
IWL_ERR(trans,
|
|
||||||
"queue number out of range: %d, must be %d to %d\n",
|
|
||||||
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
|
||||||
IWLAGN_FIRST_AMPDU_QUEUE +
|
|
||||||
hw_params(trans).num_ampdu_queues - 1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ra_tid = BUILD_RAxTID(sta_id, tid);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
|
||||||
|
|
||||||
@ -550,10 +457,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
|||||||
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
|
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
|
||||||
|
|
||||||
/* Set this queue as a chain-building queue */
|
/* Set this queue as a chain-building queue */
|
||||||
iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
|
iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
|
||||||
|
|
||||||
/* enable aggregations for the queue */
|
/* enable aggregations for the queue */
|
||||||
iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
|
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
|
||||||
|
|
||||||
/* Place first TFD at index corresponding to start sequence number.
|
/* Place first TFD at index corresponding to start sequence number.
|
||||||
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
||||||
@ -563,92 +470,42 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
|
|||||||
|
|
||||||
/* Set up Tx window size and frame limit for this queue */
|
/* Set up Tx window size and frame limit for this queue */
|
||||||
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
|
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
|
||||||
SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
|
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
|
||||||
sizeof(u32),
|
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
||||||
((frame_limit <<
|
|
||||||
SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
|
||||||
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
||||||
((frame_limit <<
|
((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
||||||
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
|
||||||
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
||||||
|
|
||||||
iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
|
iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||||
|
|
||||||
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
|
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
|
||||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
|
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
|
||||||
tx_fifo, 1);
|
fifo, true);
|
||||||
|
|
||||||
trans_pcie->txq[txq_id].sta_id = sta_id;
|
|
||||||
trans_pcie->txq[txq_id].tid = tid;
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
|
||||||
* Find first available (lowest unused) Tx Queue, mark it "active".
|
|
||||||
* Called only when finding queue for aggregation.
|
|
||||||
* Should never return anything < 7, because they should already
|
|
||||||
* be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
|
|
||||||
*/
|
|
||||||
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
|
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
int txq_id;
|
|
||||||
|
|
||||||
for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
|
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
|
||||||
txq_id++)
|
WARN_ONCE(1, "queue %d not used", txq_id);
|
||||||
if (!test_and_set_bit(txq_id,
|
return;
|
||||||
&trans_pcie->txq_ctx_active_msk))
|
|
||||||
return txq_id;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
|
|
||||||
int sta_id, int tid)
|
|
||||||
{
|
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
int txq_id;
|
|
||||||
|
|
||||||
txq_id = iwlagn_txq_ctx_activate_free(trans);
|
|
||||||
if (txq_id == -1) {
|
|
||||||
IWL_ERR(trans, "No free aggregation queue available\n");
|
|
||||||
return -ENXIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
trans_pcie->agg_txq[sta_id][tid] = txq_id;
|
|
||||||
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
|
|
||||||
{
|
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
|
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
|
|
||||||
IWL_ERR(trans,
|
|
||||||
"queue number out of range: %d, must be %d to %d\n",
|
|
||||||
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
|
||||||
IWLAGN_FIRST_AMPDU_QUEUE +
|
|
||||||
hw_params(trans).num_ampdu_queues - 1);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
||||||
|
|
||||||
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
|
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
|
||||||
|
|
||||||
trans_pcie->agg_txq[sta_id][tid] = 0;
|
|
||||||
trans_pcie->txq[txq_id].q.read_ptr = 0;
|
trans_pcie->txq[txq_id].q.read_ptr = 0;
|
||||||
trans_pcie->txq[txq_id].q.write_ptr = 0;
|
trans_pcie->txq[txq_id].q.write_ptr = 0;
|
||||||
/* supposes that ssn_idx is valid (!= 0xFFF) */
|
|
||||||
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
|
iwl_trans_set_wr_ptrs(trans, txq_id, 0);
|
||||||
|
|
||||||
iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
|
iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
|
||||||
iwl_txq_ctx_deactivate(trans_pcie, txq_id);
|
|
||||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
|
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
|
||||||
return 0;
|
0, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
|
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
|
||||||
@ -681,11 +538,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|||||||
int trace_idx;
|
int trace_idx;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
|
|
||||||
IWL_WARN(trans, "fw recovery, no hcmd send\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
copy_size = sizeof(out_cmd->hdr);
|
copy_size = sizeof(out_cmd->hdr);
|
||||||
cmd_size = sizeof(out_cmd->hdr);
|
cmd_size = sizeof(out_cmd->hdr);
|
||||||
|
|
||||||
@ -966,12 +818,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
|
|||||||
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
|
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
|
||||||
get_cmd_string(cmd->id));
|
get_cmd_string(cmd->id));
|
||||||
|
|
||||||
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
|
|
||||||
IWL_ERR(trans, "Command %s failed: FW Error\n",
|
|
||||||
get_cmd_string(cmd->id));
|
|
||||||
return -EIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
|
if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
|
||||||
&trans->shrd->status))) {
|
&trans->shrd->status))) {
|
||||||
IWL_ERR(trans, "Command %s: a command is already active!\n",
|
IWL_ERR(trans, "Command %s: a command is already active!\n",
|
||||||
|
@ -180,7 +180,6 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
|
|||||||
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
|
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
|
||||||
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
|
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
|
||||||
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
|
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
|
||||||
FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
|
|
||||||
rb_size|
|
rb_size|
|
||||||
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
|
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
|
||||||
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
|
||||||
@ -377,14 +376,6 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
|
|||||||
txq->need_update = 0;
|
txq->need_update = 0;
|
||||||
memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
|
memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
|
||||||
|
|
||||||
/*
|
|
||||||
* For the default queues 0-3, set up the swq_id
|
|
||||||
* already -- all others need to get one later
|
|
||||||
* (if they need one at all).
|
|
||||||
*/
|
|
||||||
if (txq_id < 4)
|
|
||||||
iwl_set_swq_id(txq, txq_id, txq_id);
|
|
||||||
|
|
||||||
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
|
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
|
||||||
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
|
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
|
||||||
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
||||||
@ -895,59 +886,6 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define IWL_AC_UNSET -1
|
|
||||||
|
|
||||||
struct queue_to_fifo_ac {
|
|
||||||
s8 fifo, ac;
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
|
|
||||||
{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
|
|
||||||
{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
|
|
||||||
{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
|
|
||||||
{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
|
|
||||||
{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
|
|
||||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
|
||||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
|
||||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
|
||||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
|
||||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
|
||||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
|
|
||||||
{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
|
|
||||||
{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
|
|
||||||
{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
|
|
||||||
{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
|
|
||||||
{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
|
|
||||||
{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
|
|
||||||
{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
|
|
||||||
{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
|
|
||||||
{ IWL_TX_FIFO_BE_IPAN, 2, },
|
|
||||||
{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
|
|
||||||
{ IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
|
|
||||||
};
|
|
||||||
|
|
||||||
static const u8 iwlagn_bss_ac_to_fifo[] = {
|
|
||||||
IWL_TX_FIFO_VO,
|
|
||||||
IWL_TX_FIFO_VI,
|
|
||||||
IWL_TX_FIFO_BE,
|
|
||||||
IWL_TX_FIFO_BK,
|
|
||||||
};
|
|
||||||
static const u8 iwlagn_bss_ac_to_queue[] = {
|
|
||||||
0, 1, 2, 3,
|
|
||||||
};
|
|
||||||
static const u8 iwlagn_pan_ac_to_fifo[] = {
|
|
||||||
IWL_TX_FIFO_VO_IPAN,
|
|
||||||
IWL_TX_FIFO_VI_IPAN,
|
|
||||||
IWL_TX_FIFO_BE_IPAN,
|
|
||||||
IWL_TX_FIFO_BK_IPAN,
|
|
||||||
};
|
|
||||||
static const u8 iwlagn_pan_ac_to_queue[] = {
|
|
||||||
7, 6, 5, 4,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ucode
|
* ucode
|
||||||
*/
|
*/
|
||||||
@ -1028,19 +966,8 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
|||||||
const struct fw_img *fw)
|
const struct fw_img *fw)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
bool hw_rfkill;
|
bool hw_rfkill;
|
||||||
|
|
||||||
trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
|
|
||||||
trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
|
|
||||||
|
|
||||||
trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
|
|
||||||
trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
|
|
||||||
|
|
||||||
trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
|
|
||||||
trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
|
|
||||||
|
|
||||||
/* This may fail if AMT took ownership of the device */
|
/* This may fail if AMT took ownership of the device */
|
||||||
if (iwl_prepare_card_hw(trans)) {
|
if (iwl_prepare_card_hw(trans)) {
|
||||||
IWL_WARN(trans, "Exit HW not ready\n");
|
IWL_WARN(trans, "Exit HW not ready\n");
|
||||||
@ -1098,9 +1025,7 @@ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
|
|||||||
|
|
||||||
static void iwl_tx_start(struct iwl_trans *trans)
|
static void iwl_tx_start(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
const struct queue_to_fifo_ac *queue_to_fifo;
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_trans_pcie *trans_pcie =
|
|
||||||
IWL_TRANS_GET_PCIE_TRANS(trans);
|
|
||||||
u32 a;
|
u32 a;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i, chan;
|
int i, chan;
|
||||||
@ -1166,41 +1091,19 @@ static void iwl_tx_start(struct iwl_trans *trans)
|
|||||||
/* Activate all Tx DMA/FIFO channels */
|
/* Activate all Tx DMA/FIFO channels */
|
||||||
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
|
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
|
||||||
|
|
||||||
/* map queues to FIFOs */
|
|
||||||
if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
|
|
||||||
queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
|
|
||||||
else
|
|
||||||
queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
|
|
||||||
|
|
||||||
iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
|
iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
|
||||||
|
|
||||||
/* make sure all queue are not stopped */
|
/* make sure all queue are not stopped/used */
|
||||||
memset(&trans_pcie->queue_stopped[0], 0,
|
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
|
||||||
sizeof(trans_pcie->queue_stopped));
|
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
|
||||||
for (i = 0; i < 4; i++)
|
|
||||||
atomic_set(&trans_pcie->queue_stop_count[i], 0);
|
|
||||||
|
|
||||||
/* reset to 0 to enable all the queue first */
|
for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
|
||||||
trans_pcie->txq_ctx_active_msk = 0;
|
int fifo = trans_pcie->setup_q_to_fifo[i];
|
||||||
|
|
||||||
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
|
set_bit(i, trans_pcie->queue_used);
|
||||||
IWLAGN_FIRST_AMPDU_QUEUE);
|
|
||||||
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
|
|
||||||
IWLAGN_FIRST_AMPDU_QUEUE);
|
|
||||||
|
|
||||||
for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
|
|
||||||
int fifo = queue_to_fifo[i].fifo;
|
|
||||||
int ac = queue_to_fifo[i].ac;
|
|
||||||
|
|
||||||
iwl_txq_ctx_activate(trans_pcie, i);
|
|
||||||
|
|
||||||
if (fifo == IWL_TX_FIFO_UNUSED)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (ac != IWL_AC_UNSET)
|
|
||||||
iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
|
|
||||||
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
|
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
|
||||||
fifo, 0);
|
fifo, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
|
||||||
@ -1325,70 +1228,32 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
|
struct iwl_device_cmd *dev_cmd, int txq_id)
|
||||||
u8 sta_id, u8 tid)
|
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
||||||
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
|
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
|
||||||
struct iwl_cmd_meta *out_meta;
|
struct iwl_cmd_meta *out_meta;
|
||||||
struct iwl_tx_queue *txq;
|
struct iwl_tx_queue *txq;
|
||||||
struct iwl_queue *q;
|
struct iwl_queue *q;
|
||||||
|
|
||||||
dma_addr_t phys_addr = 0;
|
dma_addr_t phys_addr = 0;
|
||||||
dma_addr_t txcmd_phys;
|
dma_addr_t txcmd_phys;
|
||||||
dma_addr_t scratch_phys;
|
dma_addr_t scratch_phys;
|
||||||
u16 len, firstlen, secondlen;
|
u16 len, firstlen, secondlen;
|
||||||
u8 wait_write_ptr = 0;
|
u8 wait_write_ptr = 0;
|
||||||
u8 txq_id;
|
|
||||||
bool is_agg = false;
|
|
||||||
__le16 fc = hdr->frame_control;
|
__le16 fc = hdr->frame_control;
|
||||||
u8 hdr_len = ieee80211_hdrlen(fc);
|
u8 hdr_len = ieee80211_hdrlen(fc);
|
||||||
u16 __maybe_unused wifi_seq;
|
u16 __maybe_unused wifi_seq;
|
||||||
|
|
||||||
/*
|
|
||||||
* Send this frame after DTIM -- there's a special queue
|
|
||||||
* reserved for this for contexts that support AP mode.
|
|
||||||
*/
|
|
||||||
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
|
|
||||||
txq_id = trans_pcie->mcast_queue[ctx];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The microcode will clear the more data
|
|
||||||
* bit in the last frame it transmits.
|
|
||||||
*/
|
|
||||||
hdr->frame_control |=
|
|
||||||
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
|
||||||
} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
|
|
||||||
txq_id = IWL_AUX_QUEUE;
|
|
||||||
else
|
|
||||||
txq_id =
|
|
||||||
trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
|
|
||||||
|
|
||||||
/* aggregation is on for this <sta,tid> */
|
|
||||||
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
|
|
||||||
WARN_ON(tid >= IWL_MAX_TID_COUNT);
|
|
||||||
txq_id = trans_pcie->agg_txq[sta_id][tid];
|
|
||||||
is_agg = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
txq = &trans_pcie->txq[txq_id];
|
txq = &trans_pcie->txq[txq_id];
|
||||||
q = &txq->q;
|
q = &txq->q;
|
||||||
|
|
||||||
spin_lock(&txq->lock);
|
if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* In AGG mode, the index in the ring must correspond to the WiFi
|
spin_lock(&txq->lock);
|
||||||
* sequence number. This is a HW requirements to help the SCD to parse
|
|
||||||
* the BA.
|
|
||||||
* Check here that the packets are in the right place on the ring.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
||||||
wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
|
||||||
WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
|
|
||||||
"Q: %d WiFi Seq %d tfdNum %d",
|
|
||||||
txq_id, wifi_seq, q->write_ptr);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Set up driver data for this TFD */
|
/* Set up driver data for this TFD */
|
||||||
txq->skbs[q->write_ptr] = skb;
|
txq->skbs[q->write_ptr] = skb;
|
||||||
@ -1565,8 +1430,8 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
|
|||||||
iwl_enable_rfkill_int(trans);
|
iwl_enable_rfkill_int(trans);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
|
static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||||
int txq_id, int ssn, struct sk_buff_head *skbs)
|
struct sk_buff_head *skbs)
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
|
||||||
@ -1578,33 +1443,15 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
|
|||||||
|
|
||||||
txq->time_stamp = jiffies;
|
txq->time_stamp = jiffies;
|
||||||
|
|
||||||
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
|
|
||||||
tid != IWL_TID_NON_QOS &&
|
|
||||||
txq_id != trans_pcie->agg_txq[sta_id][tid])) {
|
|
||||||
/*
|
|
||||||
* FIXME: this is a uCode bug which need to be addressed,
|
|
||||||
* log the information and return for now.
|
|
||||||
* Since it is can possibly happen very often and in order
|
|
||||||
* not to fill the syslog, don't use IWL_ERR or IWL_WARN
|
|
||||||
*/
|
|
||||||
IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
|
|
||||||
"agg_txq[sta_id[tid] %d", txq_id,
|
|
||||||
trans_pcie->agg_txq[sta_id][tid]);
|
|
||||||
spin_unlock(&txq->lock);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (txq->q.read_ptr != tfd_num) {
|
if (txq->q.read_ptr != tfd_num) {
|
||||||
IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
|
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
|
||||||
txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
|
txq_id, txq->q.read_ptr, tfd_num, ssn);
|
||||||
tfd_num, ssn);
|
|
||||||
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
|
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
|
||||||
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
|
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
|
||||||
iwl_wake_queue(trans, txq);
|
iwl_wake_queue(trans, txq);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&txq->lock);
|
spin_unlock(&txq->lock);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
||||||
@ -1635,6 +1482,17 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
|||||||
if (trans_pcie->n_no_reclaim_cmds)
|
if (trans_pcie->n_no_reclaim_cmds)
|
||||||
memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
|
memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
|
||||||
trans_pcie->n_no_reclaim_cmds * sizeof(u8));
|
trans_pcie->n_no_reclaim_cmds * sizeof(u8));
|
||||||
|
|
||||||
|
trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
|
||||||
|
|
||||||
|
if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
|
||||||
|
trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
|
||||||
|
|
||||||
|
/* at least the command queue must be mapped */
|
||||||
|
WARN_ON(!trans_pcie->n_q_to_fifo);
|
||||||
|
|
||||||
|
memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
|
||||||
|
trans_pcie->n_q_to_fifo * sizeof(u8));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_trans_pcie_free(struct iwl_trans *trans)
|
static void iwl_trans_pcie_free(struct iwl_trans *trans)
|
||||||
@ -1660,6 +1518,16 @@ static void iwl_trans_pcie_free(struct iwl_trans *trans)
|
|||||||
kfree(trans);
|
kfree(trans);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
|
||||||
|
{
|
||||||
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
|
|
||||||
|
if (state)
|
||||||
|
set_bit(STATUS_POWER_PMI, &trans_pcie->status);
|
||||||
|
else
|
||||||
|
clear_bit(STATUS_POWER_PMI, &trans_pcie->status);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
|
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
|
||||||
{
|
{
|
||||||
@ -1952,18 +1820,10 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
|
|||||||
txq = &trans_pcie->txq[cnt];
|
txq = &trans_pcie->txq[cnt];
|
||||||
q = &txq->q;
|
q = &txq->q;
|
||||||
pos += scnprintf(buf + pos, bufsz - pos,
|
pos += scnprintf(buf + pos, bufsz - pos,
|
||||||
"hwq %.2d: read=%u write=%u stop=%d"
|
"hwq %.2d: read=%u write=%u use=%d stop=%d\n",
|
||||||
" swq_id=%#.2x (ac %d/hwq %d)\n",
|
|
||||||
cnt, q->read_ptr, q->write_ptr,
|
cnt, q->read_ptr, q->write_ptr,
|
||||||
!!test_bit(cnt, trans_pcie->queue_stopped),
|
!!test_bit(cnt, trans_pcie->queue_used),
|
||||||
txq->swq_id, txq->swq_id & 3,
|
!!test_bit(cnt, trans_pcie->queue_stopped));
|
||||||
(txq->swq_id >> 2) & 0x1f);
|
|
||||||
if (cnt >= 4)
|
|
||||||
continue;
|
|
||||||
/* for the ACs, display the stop count too */
|
|
||||||
pos += scnprintf(buf + pos, bufsz - pos,
|
|
||||||
" stop-count: %d\n",
|
|
||||||
atomic_read(&trans_pcie->queue_stop_count[cnt]));
|
|
||||||
}
|
}
|
||||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
@ -1997,44 +1857,6 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
|
|||||||
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t iwl_dbgfs_log_event_read(struct file *file,
|
|
||||||
char __user *user_buf,
|
|
||||||
size_t count, loff_t *ppos)
|
|
||||||
{
|
|
||||||
struct iwl_trans *trans = file->private_data;
|
|
||||||
char *buf;
|
|
||||||
int pos = 0;
|
|
||||||
ssize_t ret = -ENOMEM;
|
|
||||||
|
|
||||||
ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
|
|
||||||
if (buf) {
|
|
||||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
|
||||||
kfree(buf);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t iwl_dbgfs_log_event_write(struct file *file,
|
|
||||||
const char __user *user_buf,
|
|
||||||
size_t count, loff_t *ppos)
|
|
||||||
{
|
|
||||||
struct iwl_trans *trans = file->private_data;
|
|
||||||
u32 event_log_flag;
|
|
||||||
char buf[8];
|
|
||||||
int buf_size;
|
|
||||||
|
|
||||||
memset(buf, 0, sizeof(buf));
|
|
||||||
buf_size = min(count, sizeof(buf) - 1);
|
|
||||||
if (copy_from_user(buf, user_buf, buf_size))
|
|
||||||
return -EFAULT;
|
|
||||||
if (sscanf(buf, "%d", &event_log_flag) != 1)
|
|
||||||
return -EFAULT;
|
|
||||||
if (event_log_flag == 1)
|
|
||||||
iwl_dump_nic_event_log(trans, true, NULL, false);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
|
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
|
||||||
char __user *user_buf,
|
char __user *user_buf,
|
||||||
size_t count, loff_t *ppos) {
|
size_t count, loff_t *ppos) {
|
||||||
@ -2161,7 +1983,6 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
|
|
||||||
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
|
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
|
||||||
DEBUGFS_READ_FILE_OPS(fh_reg);
|
DEBUGFS_READ_FILE_OPS(fh_reg);
|
||||||
DEBUGFS_READ_FILE_OPS(rx_queue);
|
DEBUGFS_READ_FILE_OPS(rx_queue);
|
||||||
@ -2177,7 +1998,6 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
|
|||||||
{
|
{
|
||||||
DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
|
DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
|
||||||
DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
|
DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
|
||||||
DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
|
|
||||||
DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
|
DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
|
||||||
DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
|
DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
|
||||||
DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
|
DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
|
||||||
@ -2205,7 +2025,6 @@ const struct iwl_trans_ops trans_ops_pcie = {
|
|||||||
.reclaim = iwl_trans_pcie_reclaim,
|
.reclaim = iwl_trans_pcie_reclaim,
|
||||||
|
|
||||||
.tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
|
.tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
|
||||||
.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
|
|
||||||
.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
|
.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
|
||||||
|
|
||||||
.free = iwl_trans_pcie_free,
|
.free = iwl_trans_pcie_free,
|
||||||
@ -2223,6 +2042,7 @@ const struct iwl_trans_ops trans_ops_pcie = {
|
|||||||
.write32 = iwl_trans_pcie_write32,
|
.write32 = iwl_trans_pcie_write32,
|
||||||
.read32 = iwl_trans_pcie_read32,
|
.read32 = iwl_trans_pcie_read32,
|
||||||
.configure = iwl_trans_pcie_configure,
|
.configure = iwl_trans_pcie_configure,
|
||||||
|
.set_pmi = iwl_trans_pcie_set_pmi,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
|
struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
|
||||||
|
@ -162,6 +162,8 @@ struct iwl_cmd_header {
|
|||||||
|
|
||||||
|
|
||||||
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
|
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
|
||||||
|
#define FH_RSCSR_FRAME_INVALID 0x55550000
|
||||||
|
#define FH_RSCSR_FRAME_ALIGN 0x40
|
||||||
|
|
||||||
struct iwl_rx_packet {
|
struct iwl_rx_packet {
|
||||||
/*
|
/*
|
||||||
@ -260,27 +262,42 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
|
|||||||
|
|
||||||
struct iwl_rx_cmd_buffer {
|
struct iwl_rx_cmd_buffer {
|
||||||
struct page *_page;
|
struct page *_page;
|
||||||
|
int _offset;
|
||||||
|
bool _page_stolen;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
|
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
|
||||||
{
|
{
|
||||||
return page_address(r->_page);
|
return (void *)((unsigned long)page_address(r->_page) + r->_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
|
||||||
|
{
|
||||||
|
return r->_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
|
static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
|
||||||
{
|
{
|
||||||
struct page *p = r->_page;
|
r->_page_stolen = true;
|
||||||
r->_page = NULL;
|
get_page(r->_page);
|
||||||
return p;
|
return r->_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_NO_RECLAIM_CMDS 6
|
#define MAX_NO_RECLAIM_CMDS 6
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum number of HW queues the transport layer
|
||||||
|
* currently supports
|
||||||
|
*/
|
||||||
|
#define IWL_MAX_HW_QUEUES 32
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_trans_config - transport configuration
|
* struct iwl_trans_config - transport configuration
|
||||||
*
|
*
|
||||||
* @op_mode: pointer to the upper layer.
|
* @op_mode: pointer to the upper layer.
|
||||||
* Must be set before any other call.
|
* @queue_to_fifo: queue to FIFO mapping to set up by
|
||||||
|
* default
|
||||||
|
* @n_queue_to_fifo: number of queues to set up
|
||||||
* @cmd_queue: the index of the command queue.
|
* @cmd_queue: the index of the command queue.
|
||||||
* Must be set before start_fw.
|
* Must be set before start_fw.
|
||||||
* @no_reclaim_cmds: Some devices erroneously don't set the
|
* @no_reclaim_cmds: Some devices erroneously don't set the
|
||||||
@ -291,6 +308,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
|
|||||||
*/
|
*/
|
||||||
struct iwl_trans_config {
|
struct iwl_trans_config {
|
||||||
struct iwl_op_mode *op_mode;
|
struct iwl_op_mode *op_mode;
|
||||||
|
const u8 *queue_to_fifo;
|
||||||
|
u8 n_queue_to_fifo;
|
||||||
|
|
||||||
u8 cmd_queue;
|
u8 cmd_queue;
|
||||||
const u8 *no_reclaim_cmds;
|
const u8 *no_reclaim_cmds;
|
||||||
int n_no_reclaim_cmds;
|
int n_no_reclaim_cmds;
|
||||||
@ -322,8 +342,6 @@ struct iwl_trans_config {
|
|||||||
* Must be atomic
|
* Must be atomic
|
||||||
* @reclaim: free packet until ssn. Returns a list of freed packets.
|
* @reclaim: free packet until ssn. Returns a list of freed packets.
|
||||||
* Must be atomic
|
* Must be atomic
|
||||||
* @tx_agg_alloc: allocate resources for a TX BA session
|
|
||||||
* Must be atomic
|
|
||||||
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
|
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
|
||||||
* ready and a successful ADDBA response has been received.
|
* ready and a successful ADDBA response has been received.
|
||||||
* May sleep
|
* May sleep
|
||||||
@ -346,6 +364,7 @@ struct iwl_trans_config {
|
|||||||
* @configure: configure parameters required by the transport layer from
|
* @configure: configure parameters required by the transport layer from
|
||||||
* the op_mode. May be called several times before start_fw, can't be
|
* the op_mode. May be called several times before start_fw, can't be
|
||||||
* called after that.
|
* called after that.
|
||||||
|
* @set_pmi: set the power pmi state
|
||||||
*/
|
*/
|
||||||
struct iwl_trans_ops {
|
struct iwl_trans_ops {
|
||||||
|
|
||||||
@ -360,18 +379,13 @@ struct iwl_trans_ops {
|
|||||||
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||||
|
|
||||||
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
|
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
|
struct iwl_device_cmd *dev_cmd, int queue);
|
||||||
u8 sta_id, u8 tid);
|
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
|
||||||
int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
|
struct sk_buff_head *skbs);
|
||||||
int txq_id, int ssn, struct sk_buff_head *skbs);
|
|
||||||
|
|
||||||
int (*tx_agg_disable)(struct iwl_trans *trans,
|
void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
|
||||||
int sta_id, int tid);
|
int sta_id, int tid, int frame_limit, u16 ssn);
|
||||||
int (*tx_agg_alloc)(struct iwl_trans *trans,
|
void (*tx_agg_disable)(struct iwl_trans *trans, int queue);
|
||||||
int sta_id, int tid);
|
|
||||||
void (*tx_agg_setup)(struct iwl_trans *trans,
|
|
||||||
enum iwl_rxon_context_id ctx, int sta_id, int tid,
|
|
||||||
int frame_limit, u16 ssn);
|
|
||||||
|
|
||||||
void (*free)(struct iwl_trans *trans);
|
void (*free)(struct iwl_trans *trans);
|
||||||
|
|
||||||
@ -387,6 +401,7 @@ struct iwl_trans_ops {
|
|||||||
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
|
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
|
||||||
void (*configure)(struct iwl_trans *trans,
|
void (*configure)(struct iwl_trans *trans,
|
||||||
const struct iwl_trans_config *trans_cfg);
|
const struct iwl_trans_config *trans_cfg);
|
||||||
|
void (*set_pmi)(struct iwl_trans *trans, bool state);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -507,47 +522,33 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
|
struct iwl_device_cmd *dev_cmd, int queue)
|
||||||
u8 sta_id, u8 tid)
|
|
||||||
{
|
|
||||||
if (trans->state != IWL_TRANS_FW_ALIVE)
|
|
||||||
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
|
|
||||||
|
|
||||||
return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
|
|
||||||
int tid, int txq_id, int ssn,
|
|
||||||
struct sk_buff_head *skbs)
|
|
||||||
{
|
{
|
||||||
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
||||||
"%s bad state = %d", __func__, trans->state);
|
"%s bad state = %d", __func__, trans->state);
|
||||||
|
|
||||||
return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs);
|
return trans->ops->tx(trans, skb, dev_cmd, queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
|
static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
|
||||||
int sta_id, int tid)
|
int ssn, struct sk_buff_head *skbs)
|
||||||
{
|
{
|
||||||
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
||||||
"%s bad state = %d", __func__, trans->state);
|
"%s bad state = %d", __func__, trans->state);
|
||||||
|
|
||||||
return trans->ops->tx_agg_disable(trans, sta_id, tid);
|
trans->ops->reclaim(trans, queue, ssn, skbs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
|
static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue)
|
||||||
int sta_id, int tid)
|
|
||||||
{
|
{
|
||||||
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
||||||
"%s bad state = %d", __func__, trans->state);
|
"%s bad state = %d", __func__, trans->state);
|
||||||
|
|
||||||
return trans->ops->tx_agg_alloc(trans, sta_id, tid);
|
trans->ops->tx_agg_disable(trans, queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
|
||||||
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
|
int fifo, int sta_id, int tid,
|
||||||
enum iwl_rxon_context_id ctx,
|
|
||||||
int sta_id, int tid,
|
|
||||||
int frame_limit, u16 ssn)
|
int frame_limit, u16 ssn)
|
||||||
{
|
{
|
||||||
might_sleep();
|
might_sleep();
|
||||||
@ -555,7 +556,8 @@ static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
|
|||||||
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
|
||||||
"%s bad state = %d", __func__, trans->state);
|
"%s bad state = %d", __func__, trans->state);
|
||||||
|
|
||||||
trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
|
trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
|
||||||
|
frame_limit, ssn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iwl_trans_free(struct iwl_trans *trans)
|
static inline void iwl_trans_free(struct iwl_trans *trans)
|
||||||
@ -611,6 +613,11 @@ static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
|
|||||||
return trans->ops->read32(trans, ofs);
|
return trans->ops->read32(trans, ofs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
|
||||||
|
{
|
||||||
|
trans->ops->set_pmi(trans, state);
|
||||||
|
}
|
||||||
|
|
||||||
/*****************************************************
|
/*****************************************************
|
||||||
* Transport layers implementations + their allocation function
|
* Transport layers implementations + their allocation function
|
||||||
******************************************************/
|
******************************************************/
|
||||||
|
@ -40,37 +40,6 @@
|
|||||||
#include "iwl-fh.h"
|
#include "iwl-fh.h"
|
||||||
#include "iwl-op-mode.h"
|
#include "iwl-op-mode.h"
|
||||||
|
|
||||||
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
|
|
||||||
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
|
|
||||||
0, COEX_UNASSOC_IDLE_FLAGS},
|
|
||||||
{COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
|
|
||||||
0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
|
|
||||||
{COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
|
|
||||||
0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
|
|
||||||
{COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
|
|
||||||
0, COEX_CALIBRATION_FLAGS},
|
|
||||||
{COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
|
|
||||||
0, COEX_PERIODIC_CALIBRATION_FLAGS},
|
|
||||||
{COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
|
|
||||||
0, COEX_CONNECTION_ESTAB_FLAGS},
|
|
||||||
{COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
|
|
||||||
0, COEX_ASSOCIATED_IDLE_FLAGS},
|
|
||||||
{COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
|
|
||||||
0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
|
|
||||||
{COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
|
|
||||||
0, COEX_ASSOC_AUTO_SCAN_FLAGS},
|
|
||||||
{COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
|
|
||||||
0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
|
|
||||||
{COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
|
|
||||||
{COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
|
|
||||||
{COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
|
|
||||||
0, COEX_STAND_ALONE_DEBUG_FLAGS},
|
|
||||||
{COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
|
|
||||||
0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
|
|
||||||
{COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
|
|
||||||
{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
|
|
||||||
};
|
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
*
|
*
|
||||||
* uCode download functions
|
* uCode download functions
|
||||||
@ -174,24 +143,6 @@ static int iwl_send_calib_cfg(struct iwl_priv *priv)
|
|||||||
return iwl_dvm_send_cmd(priv, &cmd);
|
return iwl_dvm_send_cmd(priv, &cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
int iwlagn_rx_calib_result(struct iwl_priv *priv,
|
|
||||||
struct iwl_rx_cmd_buffer *rxb,
|
|
||||||
struct iwl_device_cmd *cmd)
|
|
||||||
{
|
|
||||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
||||||
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->data;
|
|
||||||
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
|
||||||
|
|
||||||
/* reduce the size of the length field itself */
|
|
||||||
len -= 4;
|
|
||||||
|
|
||||||
if (iwl_calib_set(priv, hdr, len))
|
|
||||||
IWL_ERR(priv, "Failed to record calibration data %d\n",
|
|
||||||
hdr->op_code);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int iwl_init_alive_start(struct iwl_priv *priv)
|
int iwl_init_alive_start(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -233,25 +184,9 @@ static int iwl_send_wimax_coex(struct iwl_priv *priv)
|
|||||||
{
|
{
|
||||||
struct iwl_wimax_coex_cmd coex_cmd;
|
struct iwl_wimax_coex_cmd coex_cmd;
|
||||||
|
|
||||||
if (cfg(priv)->base_params->support_wimax_coexist) {
|
|
||||||
/* UnMask wake up src at associated sleep */
|
|
||||||
coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
|
|
||||||
|
|
||||||
/* UnMask wake up src at unassociated sleep */
|
|
||||||
coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
|
|
||||||
memcpy(coex_cmd.sta_prio, cu_priorities,
|
|
||||||
sizeof(struct iwl_wimax_coex_event_entry) *
|
|
||||||
COEX_NUM_OF_EVENTS);
|
|
||||||
|
|
||||||
/* enabling the coexistence feature */
|
|
||||||
coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
|
|
||||||
|
|
||||||
/* enabling the priorities tables */
|
|
||||||
coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
|
|
||||||
} else {
|
|
||||||
/* coexistence is disabled */
|
/* coexistence is disabled */
|
||||||
memset(&coex_cmd, 0, sizeof(coex_cmd));
|
memset(&coex_cmd, 0, sizeof(coex_cmd));
|
||||||
}
|
|
||||||
return iwl_dvm_send_cmd_pdu(priv,
|
return iwl_dvm_send_cmd_pdu(priv,
|
||||||
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
|
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
|
||||||
sizeof(coex_cmd), &coex_cmd);
|
sizeof(coex_cmd), &coex_cmd);
|
||||||
@ -417,9 +352,8 @@ struct iwl_alive_data {
|
|||||||
u8 subtype;
|
u8 subtype;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
|
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
|
||||||
struct iwl_rx_packet *pkt,
|
struct iwl_rx_packet *pkt, void *data)
|
||||||
void *data)
|
|
||||||
{
|
{
|
||||||
struct iwl_priv *priv =
|
struct iwl_priv *priv =
|
||||||
container_of(notif_wait, struct iwl_priv, notif_wait);
|
container_of(notif_wait, struct iwl_priv, notif_wait);
|
||||||
@ -433,13 +367,15 @@ static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
|
|||||||
palive->is_valid, palive->ver_type,
|
palive->is_valid, palive->ver_type,
|
||||||
palive->ver_subtype);
|
palive->ver_subtype);
|
||||||
|
|
||||||
priv->shrd->device_pointers.error_event_table =
|
priv->device_pointers.error_event_table =
|
||||||
le32_to_cpu(palive->error_event_table_ptr);
|
le32_to_cpu(palive->error_event_table_ptr);
|
||||||
priv->shrd->device_pointers.log_event_table =
|
priv->device_pointers.log_event_table =
|
||||||
le32_to_cpu(palive->log_event_table_ptr);
|
le32_to_cpu(palive->log_event_table_ptr);
|
||||||
|
|
||||||
alive_data->subtype = palive->ver_subtype;
|
alive_data->subtype = palive->ver_subtype;
|
||||||
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
|
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define UCODE_ALIVE_TIMEOUT HZ
|
#define UCODE_ALIVE_TIMEOUT HZ
|
||||||
@ -453,9 +389,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||||||
const struct fw_img *fw;
|
const struct fw_img *fw;
|
||||||
int ret;
|
int ret;
|
||||||
enum iwl_ucode_type old_type;
|
enum iwl_ucode_type old_type;
|
||||||
|
static const u8 alive_cmd[] = { REPLY_ALIVE };
|
||||||
|
|
||||||
old_type = priv->shrd->ucode_type;
|
old_type = priv->cur_ucode;
|
||||||
priv->shrd->ucode_type = ucode_type;
|
priv->cur_ucode = ucode_type;
|
||||||
fw = iwl_get_ucode_image(priv, ucode_type);
|
fw = iwl_get_ucode_image(priv, ucode_type);
|
||||||
|
|
||||||
priv->ucode_loaded = false;
|
priv->ucode_loaded = false;
|
||||||
@ -463,12 +400,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||||||
if (!fw)
|
if (!fw)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
iwl_init_notification_wait(&priv->notif_wait, &alive_wait, REPLY_ALIVE,
|
iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
|
||||||
|
alive_cmd, ARRAY_SIZE(alive_cmd),
|
||||||
iwl_alive_fn, &alive_data);
|
iwl_alive_fn, &alive_data);
|
||||||
|
|
||||||
ret = iwl_trans_start_fw(trans(priv), fw);
|
ret = iwl_trans_start_fw(trans(priv), fw);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
priv->shrd->ucode_type = old_type;
|
priv->cur_ucode = old_type;
|
||||||
iwl_remove_notification(&priv->notif_wait, &alive_wait);
|
iwl_remove_notification(&priv->notif_wait, &alive_wait);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -480,13 +418,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||||||
ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
|
ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
|
||||||
UCODE_ALIVE_TIMEOUT);
|
UCODE_ALIVE_TIMEOUT);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
priv->shrd->ucode_type = old_type;
|
priv->cur_ucode = old_type;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!alive_data.valid) {
|
if (!alive_data.valid) {
|
||||||
IWL_ERR(priv, "Loaded ucode is not valid!\n");
|
IWL_ERR(priv, "Loaded ucode is not valid!\n");
|
||||||
priv->shrd->ucode_type = old_type;
|
priv->cur_ucode = old_type;
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -498,7 +436,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||||||
if (ucode_type != IWL_UCODE_WOWLAN) {
|
if (ucode_type != IWL_UCODE_WOWLAN) {
|
||||||
ret = iwl_verify_ucode(priv, ucode_type);
|
ret = iwl_verify_ucode(priv, ucode_type);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
priv->shrd->ucode_type = old_type;
|
priv->cur_ucode = old_type;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,7 +448,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
IWL_WARN(priv,
|
IWL_WARN(priv,
|
||||||
"Could not complete ALIVE transition: %d\n", ret);
|
"Could not complete ALIVE transition: %d\n", ret);
|
||||||
priv->shrd->ucode_type = old_type;
|
priv->cur_ucode = old_type;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -519,9 +457,38 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
|
||||||
|
struct iwl_rx_packet *pkt, void *data)
|
||||||
|
{
|
||||||
|
struct iwl_priv *priv = data;
|
||||||
|
struct iwl_calib_hdr *hdr;
|
||||||
|
int len;
|
||||||
|
|
||||||
|
if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
|
||||||
|
WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr = (struct iwl_calib_hdr *)pkt->data;
|
||||||
|
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||||
|
|
||||||
|
/* reduce the size by the length field itself */
|
||||||
|
len -= sizeof(__le32);
|
||||||
|
|
||||||
|
if (iwl_calib_set(priv, hdr, len))
|
||||||
|
IWL_ERR(priv, "Failed to record calibration data %d\n",
|
||||||
|
hdr->op_code);
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
int iwl_run_init_ucode(struct iwl_priv *priv)
|
int iwl_run_init_ucode(struct iwl_priv *priv)
|
||||||
{
|
{
|
||||||
struct iwl_notification_wait calib_wait;
|
struct iwl_notification_wait calib_wait;
|
||||||
|
static const u8 calib_complete[] = {
|
||||||
|
CALIBRATION_RES_NOTIFICATION,
|
||||||
|
CALIBRATION_COMPLETE_NOTIFICATION
|
||||||
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&priv->mutex);
|
lockdep_assert_held(&priv->mutex);
|
||||||
@ -534,8 +501,8 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
|
||||||
CALIBRATION_COMPLETE_NOTIFICATION,
|
calib_complete, ARRAY_SIZE(calib_complete),
|
||||||
NULL, NULL);
|
iwlagn_wait_calib, priv);
|
||||||
|
|
||||||
/* Will also start the device */
|
/* Will also start the device */
|
||||||
ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
|
ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
|
||||||
|
@ -637,6 +637,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
|
|||||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||||
struct ieee80211_rx_status rx_status;
|
struct ieee80211_rx_status rx_status;
|
||||||
|
struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
|
||||||
|
|
||||||
if (data->idle) {
|
if (data->idle) {
|
||||||
wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
|
wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
|
||||||
@ -671,6 +672,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
|
|||||||
spin_lock(&hwsim_radio_lock);
|
spin_lock(&hwsim_radio_lock);
|
||||||
list_for_each_entry(data2, &hwsim_radios, list) {
|
list_for_each_entry(data2, &hwsim_radios, list) {
|
||||||
struct sk_buff *nskb;
|
struct sk_buff *nskb;
|
||||||
|
struct ieee80211_mgmt *mgmt;
|
||||||
|
|
||||||
if (data == data2)
|
if (data == data2)
|
||||||
continue;
|
continue;
|
||||||
@ -688,8 +690,17 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
|
|||||||
|
|
||||||
if (mac80211_hwsim_addr_match(data2, hdr->addr1))
|
if (mac80211_hwsim_addr_match(data2, hdr->addr1))
|
||||||
ack = true;
|
ack = true;
|
||||||
|
|
||||||
|
/* set bcn timestamp relative to receiver mactime */
|
||||||
rx_status.mactime =
|
rx_status.mactime =
|
||||||
le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
|
le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
|
||||||
|
mgmt = (struct ieee80211_mgmt *) nskb->data;
|
||||||
|
if (ieee80211_is_beacon(mgmt->frame_control) ||
|
||||||
|
ieee80211_is_probe_resp(mgmt->frame_control))
|
||||||
|
mgmt->u.beacon.timestamp = cpu_to_le64(
|
||||||
|
rx_status.mactime +
|
||||||
|
24 * 8 * 10 / txrate->bitrate);
|
||||||
|
|
||||||
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
|
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
|
||||||
ieee80211_rx_irqsafe(data2->hw, nskb);
|
ieee80211_rx_irqsafe(data2->hw, nskb);
|
||||||
}
|
}
|
||||||
@ -703,12 +714,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|||||||
bool ack;
|
bool ack;
|
||||||
struct ieee80211_tx_info *txi;
|
struct ieee80211_tx_info *txi;
|
||||||
u32 _pid;
|
u32 _pid;
|
||||||
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
|
|
||||||
struct mac80211_hwsim_data *data = hw->priv;
|
|
||||||
|
|
||||||
if (ieee80211_is_beacon(mgmt->frame_control) ||
|
|
||||||
ieee80211_is_probe_resp(mgmt->frame_control))
|
|
||||||
mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
|
|
||||||
|
|
||||||
mac80211_hwsim_monitor_rx(hw, skb);
|
mac80211_hwsim_monitor_rx(hw, skb);
|
||||||
|
|
||||||
@ -805,11 +810,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
|
|||||||
struct ieee80211_vif *vif)
|
struct ieee80211_vif *vif)
|
||||||
{
|
{
|
||||||
struct ieee80211_hw *hw = arg;
|
struct ieee80211_hw *hw = arg;
|
||||||
struct mac80211_hwsim_data *data = hw->priv;
|
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct ieee80211_tx_info *info;
|
struct ieee80211_tx_info *info;
|
||||||
u32 _pid;
|
u32 _pid;
|
||||||
struct ieee80211_mgmt *mgmt;
|
|
||||||
|
|
||||||
hwsim_check_magic(vif);
|
hwsim_check_magic(vif);
|
||||||
|
|
||||||
@ -823,9 +826,6 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
|
|||||||
return;
|
return;
|
||||||
info = IEEE80211_SKB_CB(skb);
|
info = IEEE80211_SKB_CB(skb);
|
||||||
|
|
||||||
mgmt = (struct ieee80211_mgmt *) skb->data;
|
|
||||||
mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
|
|
||||||
|
|
||||||
mac80211_hwsim_monitor_rx(hw, skb);
|
mac80211_hwsim_monitor_rx(hw, skb);
|
||||||
|
|
||||||
/* wmediumd mode check */
|
/* wmediumd mode check */
|
||||||
@ -1450,7 +1450,7 @@ DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
|
|||||||
hwsim_fops_group_read, hwsim_fops_group_write,
|
hwsim_fops_group_read, hwsim_fops_group_write,
|
||||||
"%llx\n");
|
"%llx\n");
|
||||||
|
|
||||||
struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
|
static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
|
||||||
struct mac_address *addr)
|
struct mac_address *addr)
|
||||||
{
|
{
|
||||||
struct mac80211_hwsim_data *data;
|
struct mac80211_hwsim_data *data;
|
||||||
@ -1795,9 +1795,11 @@ static int __init init_mac80211_hwsim(void)
|
|||||||
IEEE80211_HW_SIGNAL_DBM |
|
IEEE80211_HW_SIGNAL_DBM |
|
||||||
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
|
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
|
||||||
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
|
||||||
IEEE80211_HW_AMPDU_AGGREGATION;
|
IEEE80211_HW_AMPDU_AGGREGATION |
|
||||||
|
IEEE80211_HW_WANT_MONITOR_VIF;
|
||||||
|
|
||||||
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
|
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
|
||||||
|
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
|
||||||
|
|
||||||
/* ask mac80211 to reserve space for magic */
|
/* ask mac80211 to reserve space for magic */
|
||||||
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
|
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
|
||||||
|
@ -350,25 +350,26 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
|
|||||||
ret_len += sizeof(struct mwifiex_ie_types_htcap);
|
ret_len += sizeof(struct mwifiex_ie_types_htcap);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bss_desc->bcn_ht_info) {
|
if (bss_desc->bcn_ht_oper) {
|
||||||
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
|
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
|
||||||
ht_info = (struct mwifiex_ie_types_htinfo *) *buffer;
|
ht_info = (struct mwifiex_ie_types_htinfo *) *buffer;
|
||||||
memset(ht_info, 0,
|
memset(ht_info, 0,
|
||||||
sizeof(struct mwifiex_ie_types_htinfo));
|
sizeof(struct mwifiex_ie_types_htinfo));
|
||||||
ht_info->header.type =
|
ht_info->header.type =
|
||||||
cpu_to_le16(WLAN_EID_HT_INFORMATION);
|
cpu_to_le16(WLAN_EID_HT_OPERATION);
|
||||||
ht_info->header.len =
|
ht_info->header.len =
|
||||||
cpu_to_le16(sizeof(struct ieee80211_ht_info));
|
cpu_to_le16(
|
||||||
|
sizeof(struct ieee80211_ht_operation));
|
||||||
|
|
||||||
memcpy((u8 *) ht_info +
|
memcpy((u8 *) ht_info +
|
||||||
sizeof(struct mwifiex_ie_types_header),
|
sizeof(struct mwifiex_ie_types_header),
|
||||||
(u8 *) bss_desc->bcn_ht_info +
|
(u8 *) bss_desc->bcn_ht_oper +
|
||||||
sizeof(struct ieee_types_header),
|
sizeof(struct ieee_types_header),
|
||||||
le16_to_cpu(ht_info->header.len));
|
le16_to_cpu(ht_info->header.len));
|
||||||
|
|
||||||
if (!(sband->ht_cap.cap &
|
if (!(sband->ht_cap.cap &
|
||||||
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
|
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
|
||||||
ht_info->ht_info.ht_param &=
|
ht_info->ht_oper.ht_param &=
|
||||||
~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY |
|
~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY |
|
||||||
IEEE80211_HT_PARAM_CHA_SEC_OFFSET);
|
IEEE80211_HT_PARAM_CHA_SEC_OFFSET);
|
||||||
|
|
||||||
@ -385,16 +386,16 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
|
|||||||
sizeof(struct mwifiex_ie_types_chan_list_param_set) -
|
sizeof(struct mwifiex_ie_types_chan_list_param_set) -
|
||||||
sizeof(struct mwifiex_ie_types_header));
|
sizeof(struct mwifiex_ie_types_header));
|
||||||
chan_list->chan_scan_param[0].chan_number =
|
chan_list->chan_scan_param[0].chan_number =
|
||||||
bss_desc->bcn_ht_info->control_chan;
|
bss_desc->bcn_ht_oper->primary_chan;
|
||||||
chan_list->chan_scan_param[0].radio_type =
|
chan_list->chan_scan_param[0].radio_type =
|
||||||
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
|
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
|
||||||
|
|
||||||
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
|
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
|
||||||
bss_desc->bcn_ht_info->ht_param &
|
bss_desc->bcn_ht_oper->ht_param &
|
||||||
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
|
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
|
||||||
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
|
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
|
||||||
radio_type,
|
radio_type,
|
||||||
(bss_desc->bcn_ht_info->ht_param &
|
(bss_desc->bcn_ht_oper->ht_param &
|
||||||
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
|
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
|
||||||
|
|
||||||
*buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
|
*buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user