wireless-next patches for v6.13

Most likely the last -next pull request for v6.13. Most changes are in
 Realtek and Qualcomm drivers, otherwise not really anything
 noteworthy.
 
 Major changes:
 
 mac80211
 
 * EHT 1024 aggregation size for transmissions
 
 ath12k
 
 * switch to using wiphy_lock() and remove ar->conf_mutex
 
 * firmware coredump collection support
 
 * add debugfs support for a multitude of statistics
 
 ath11k
 
 * dt: document WCN6855 hardware inputs
 
 ath9k
 
 * remove include/linux/ath9k_platform.h
 
 ath5k
 
 * Arcadyan ARV45XX AR2417 & Gigaset SX76[23] AR241[34]A support
 
 rtw88:
 
 * 8821au and 8812au USB adapters support
 
 rtw89
 
 * thermal protection
 
 * firmware secure boot for WiFi 6 chip
 -----BEGIN PGP SIGNATURE-----
 
 iQFFBAABCgAvFiEEiBjanGPFTz4PRfLobhckVSbrbZsFAmc04UYRHGt2YWxvQGtl
 cm5lbC5vcmcACgkQbhckVSbrbZuckgf/RV0zy8gMuzJ/cSk1GDKoOYmEwAZ4JvtW
 teAKghsODDW/bng2iKnXphJyx3spZRCNuvOmfPcHsWoResX+vqrKJOaER/3159OF
 68xAPZNXPRF4M693IpIUB/P3uTw/jieXPI7ftSPuUOhStca/ALwQd5Lp3kNKkVtq
 HipXJwCenVS7Hd8DdHbpvYFUckRWr3tHPFlOgG3qOQOVvfRen2z9rhM14oK9rn+h
 f309ATHKTbpTKNagOPYAYcyHs3zE59hlVRgRqHL7Ew0a0HI8uPJ4KK2n5W6tZJFN
 swhoQolc1uXrRYlZ3Bdr7mKSIqt557kRz7NJ9ITe7KKCU0CxM/7nhQ==
 =v8bS
 -----END PGP SIGNATURE-----

Merge tag 'wireless-next-2024-11-13' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Kalle Valo says:

====================
wireless-next patches for v6.13

Most likely the last -next pull request for v6.13. Most changes are in
Realtek and Qualcomm drivers, otherwise not really anything
noteworthy.

Major changes:

mac80211
 * EHT 1024 aggregation size for transmissions

ath12k
 * switch to using wiphy_lock() and remove ar->conf_mutex
 * firmware coredump collection support
 * add debugfs support for a multitude of statistics

ath11k
 * dt: document WCN6855 hardware inputs

ath9k
 * remove include/linux/ath9k_platform.h

ath5k
 * Arcadyan ARV45XX AR2417 & Gigaset SX76[23] AR241[34]A support

rtw88:
 * 8821au and 8812au USB adapters support

rtw89
 * thermal protection
 * firmware secure boot for WiFi 6 chip

* tag 'wireless-next-2024-11-13' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (154 commits)
  Revert "wifi: iwlegacy: do not skip frames with bad FCS"
  wifi: mac80211: pass MBSSID config by reference
  wifi: mac80211: Support EHT 1024 aggregation size in TX
  net: rfkill: gpio: Add check for clk_enable()
  wifi: brcmfmac: Fix oops due to NULL pointer dereference in brcmf_sdiod_sglist_rw()
  wifi: Switch back to struct platform_driver::remove()
  wifi: ipw2x00: libipw_rx_any(): fix bad alignment
  wifi: brcmfmac: release 'root' node in all execution paths
  wifi: iwlwifi: mvm: don't call power_update_mac in fast suspend
  wifi: iwlwifi: s/IWL_MVM_INVALID_STA/IWL_INVALID_STA
  wifi: iwlwifi: bump minimum API version in BZ/SC to 92
  wifi: iwlwifi: move IWL_LMAC_*_INDEX to fw/api/context.h
  wifi: iwlwifi: be less noisy if the NIC is dead in S3
  wifi: iwlwifi: mvm: tell iwlmei when we finished suspending
  wifi: iwlwifi: allow fast resume on ax200
  wifi: iwlwifi: mvm: support new initiator and responder command version
  wifi: iwlwifi: mvm: use wiphy locked debugfs for low-latency
  wifi: iwlwifi: mvm: MLO scan upon channel condition degradation
  wifi: iwlwifi: mvm: support new versions of the wowlan APIs
  wifi: iwlwifi: mvm: allow always calling iwl_mvm_get_bss_vif()
  ...
====================

Link: https://patch.msgid.link/20241113172918.A8A11C4CEC3@smtp.kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-11-13 18:35:18 -08:00
commit 5c46638540
199 changed files with 15731 additions and 2012 deletions

View File

@ -50,6 +50,9 @@ properties:
vddrfa1p7-supply:
description: VDD_RFA_1P7 supply regulator handle
vddrfa1p8-supply:
description: VDD_RFA_1P8 supply regulator handle
vddpcie0p9-supply:
description: VDD_PCIE_0P9 supply regulator handle
@ -77,6 +80,22 @@ allOf:
- vddrfa1p7-supply
- vddpcie0p9-supply
- vddpcie1p8-supply
- if:
properties:
compatible:
contains:
const: pci17cb,1103
then:
required:
- vddrfacmn-supply
- vddaon-supply
- vddwlcx-supply
- vddwlmx-supply
- vddrfa0p8-supply
- vddrfa1p2-supply
- vddrfa1p8-supply
- vddpcie0p9-supply
- vddpcie1p8-supply
additionalProperties: false
@ -99,6 +118,16 @@ examples:
compatible = "pci17cb,1103";
reg = <0x10000 0x0 0x0 0x0 0x0>;
vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
vddaon-supply = <&vreg_pmu_aon_0p8>;
vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
qcom,ath11k-calibration-variant = "LE_X13S";
};
};

View File

@ -837,12 +837,12 @@ static void ath10k_ahb_remove(struct platform_device *pdev)
}
static struct platform_driver ath10k_ahb_driver = {
.driver = {
.name = "ath10k_ahb",
.driver = {
.name = "ath10k_ahb",
.of_match_table = ath10k_ahb_of_match,
},
.probe = ath10k_ahb_probe,
.remove_new = ath10k_ahb_remove,
.probe = ath10k_ahb_probe,
.remove = ath10k_ahb_remove,
};
int ath10k_ahb_init(void)

View File

@ -6369,7 +6369,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_scan_request *req = &hw_req->req;
struct wmi_start_scan_arg arg;
struct wmi_start_scan_arg *arg = NULL;
int ret = 0;
int i;
u32 scan_timeout;
@ -6402,56 +6402,61 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
memset(&arg, 0, sizeof(arg));
ath10k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH10K_SCAN_ID;
arg = kzalloc(sizeof(*arg), GFP_KERNEL);
if (!arg) {
ret = -ENOMEM;
goto exit;
}
ath10k_wmi_start_scan_init(ar, arg);
arg->vdev_id = arvif->vdev_id;
arg->scan_id = ATH10K_SCAN_ID;
if (req->ie_len) {
arg.ie_len = req->ie_len;
memcpy(arg.ie, req->ie, arg.ie_len);
arg->ie_len = req->ie_len;
memcpy(arg->ie, req->ie, arg->ie_len);
}
if (req->n_ssids) {
arg.n_ssids = req->n_ssids;
for (i = 0; i < arg.n_ssids; i++) {
arg.ssids[i].len = req->ssids[i].ssid_len;
arg.ssids[i].ssid = req->ssids[i].ssid;
arg->n_ssids = req->n_ssids;
for (i = 0; i < arg->n_ssids; i++) {
arg->ssids[i].len = req->ssids[i].ssid_len;
arg->ssids[i].ssid = req->ssids[i].ssid;
}
} else {
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
arg->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
}
if (req->n_channels) {
arg.n_channels = req->n_channels;
for (i = 0; i < arg.n_channels; i++)
arg.channels[i] = req->channels[i]->center_freq;
arg->n_channels = req->n_channels;
for (i = 0; i < arg->n_channels; i++)
arg->channels[i] = req->channels[i]->center_freq;
}
/* if duration is set, default dwell times will be overwritten */
if (req->duration) {
arg.dwell_time_active = req->duration;
arg.dwell_time_passive = req->duration;
arg.burst_duration_ms = req->duration;
arg->dwell_time_active = req->duration;
arg->dwell_time_passive = req->duration;
arg->burst_duration_ms = req->duration;
scan_timeout = min_t(u32, arg.max_rest_time *
(arg.n_channels - 1) + (req->duration +
scan_timeout = min_t(u32, arg->max_rest_time *
(arg->n_channels - 1) + (req->duration +
ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
arg.n_channels, arg.max_scan_time);
arg->n_channels, arg->max_scan_time);
} else {
scan_timeout = arg.max_scan_time;
scan_timeout = arg->max_scan_time;
}
/* Add a 200ms margin to account for event/command processing */
scan_timeout += 200;
ret = ath10k_start_scan(ar, &arg);
ret = ath10k_start_scan(ar, arg);
if (ret) {
ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@ -6463,6 +6468,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
msecs_to_jiffies(scan_timeout));
exit:
kfree(arg);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@ -7899,7 +7906,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct wmi_start_scan_arg arg;
struct wmi_start_scan_arg *arg = NULL;
int ret = 0;
u32 scan_time_msec;
@ -7936,20 +7943,25 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
memset(&arg, 0, sizeof(arg));
ath10k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id;
arg.scan_id = ATH10K_SCAN_ID;
arg.n_channels = 1;
arg.channels[0] = chan->center_freq;
arg.dwell_time_active = scan_time_msec;
arg.dwell_time_passive = scan_time_msec;
arg.max_scan_time = scan_time_msec;
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
arg.burst_duration_ms = duration;
arg = kzalloc(sizeof(*arg), GFP_KERNEL);
if (!arg) {
ret = -ENOMEM;
goto exit;
}
ret = ath10k_start_scan(ar, &arg);
ath10k_wmi_start_scan_init(ar, arg);
arg->vdev_id = arvif->vdev_id;
arg->scan_id = ATH10K_SCAN_ID;
arg->n_channels = 1;
arg->channels[0] = chan->center_freq;
arg->dwell_time_active = scan_time_msec;
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
arg->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
arg->burst_duration_ms = duration;
ret = ath10k_start_scan(ar, arg);
if (ret) {
ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@ -7975,6 +7987,8 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = 0;
exit:
kfree(arg);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@ -9122,7 +9136,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[
{6, {2633, 2925}, {1215, 1350}, {585, 650} },
{7, {2925, 3250}, {1350, 1500}, {650, 722} },
{8, {3510, 3900}, {1620, 1800}, {780, 867} },
{9, {3900, 4333}, {1800, 2000}, {780, 867} }
{9, {3900, 4333}, {1800, 2000}, {865, 960} }
};
/*MCS parameters with Nss = 2 */
@ -9137,7 +9151,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[
{6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
{7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
{8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
{9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
{9, {7800, 8667}, {3600, 4000}, {1730, 1920} }
};
static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,

View File

@ -3,7 +3,7 @@
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -2648,9 +2648,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
netif_napi_del(&ar->napi);
ath10k_core_destroy(ar);
destroy_workqueue(ar_sdio->workqueue);
ath10k_core_destroy(ar);
}
static const struct sdio_device_id ath10k_sdio_devices[] = {

View File

@ -1885,11 +1885,11 @@ static void ath10k_snoc_shutdown(struct platform_device *pdev)
}
static struct platform_driver ath10k_snoc_driver = {
.probe = ath10k_snoc_probe,
.remove_new = ath10k_snoc_remove,
.probe = ath10k_snoc_probe,
.remove = ath10k_snoc_remove,
.shutdown = ath10k_snoc_shutdown,
.driver = {
.name = "ath10k_snoc",
.name = "ath10k_snoc",
.of_match_table = ath10k_snoc_dt_match,
},
};

View File

@ -1000,18 +1000,18 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
if (!ab->hw_params.fixed_fw_mem)
return 0;
ret = ath11k_ahb_setup_msa_resources(ab);
if (ret) {
ath11k_err(ab, "failed to setup msa resources\n");
return ret;
}
node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
if (!node) {
ab_ahb->fw.use_tz = true;
return 0;
}
ret = ath11k_ahb_setup_msa_resources(ab);
if (ret) {
ath11k_err(ab, "failed to setup msa resources\n");
return ret;
}
info.fwnode = &node->fwnode;
info.parent = host_dev;
info.name = node->name;
@ -1313,12 +1313,12 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev)
}
static struct platform_driver ath11k_ahb_driver = {
.driver = {
.name = "ath11k",
.driver = {
.name = "ath11k",
.of_match_table = ath11k_ahb_of_match,
},
.probe = ath11k_ahb_probe,
.remove_new = ath11k_ahb_remove,
.probe = ath11k_ahb_probe,
.remove = ath11k_ahb_remove,
.shutdown = ath11k_ahb_shutdown,
};

View File

@ -616,7 +616,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
.fw_wmi_diag_event = false,
.fw_wmi_diag_event = true,
.current_cc_support = true,
.dbr_debug_support = false,
.global_reset = false,

View File

@ -1351,6 +1351,7 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
kfree(hal->srng_config);
hal->srng_config = NULL;
}
EXPORT_SYMBOL(ath11k_hal_srng_deinit);

View File

@ -2180,6 +2180,9 @@ static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
ab->mem = bar_addr_va;
ab->mem_len = resp.bar_size;
if (!ab->hw_params.ce_remap)
ab->mem_ce = ab->mem;
return 0;
out:
return ret;

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@ -155,6 +155,7 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
u8 bytemask[WOW_MAX_PATTERN_SIZE] = {};
int total_len = old->pkt_offset + old->pattern_len;
int hdr_80211_end_offset;
@ -172,11 +173,17 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
struct rfc1042_hdr *new_rfc_mask =
(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
int rfc_len = sizeof(*new_rfc_pattern);
int i;
/* convert bitmask to bytemask */
for (i = 0; i < old->pattern_len; i++)
if (old->mask[i / 8] & BIT(i % 8))
bytemask[i] = 0xff;
memcpy(hdr_8023_pattern + old->pkt_offset,
old->pattern, ETH_HLEN - old->pkt_offset);
memcpy(hdr_8023_bit_mask + old->pkt_offset,
old->mask, ETH_HLEN - old->pkt_offset);
bytemask, ETH_HLEN - old->pkt_offset);
/* Copy destination address */
memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
@ -232,7 +239,7 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
(void *)old->pattern + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
memcpy((u8 *)new->mask + new->pattern_len,
(void *)old->mask + ETH_HLEN - old->pkt_offset,
bytemask + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
new->pattern_len += total_len - ETH_HLEN;
@ -393,35 +400,31 @@ static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
}
for (i = 0; i < wowlan->n_patterns; i++) {
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
struct cfg80211_pkt_pattern new_pattern = {};
struct cfg80211_pkt_pattern old_pattern = patterns[i];
int j;
new_pattern.pattern = ath_pattern;
new_pattern.mask = ath_bitmask;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
/* convert bytemask to bitmask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
if (patterns[i].pkt_offset < ETH_HLEN) {
u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
memcpy(pattern_ext, old_pattern.pattern,
old_pattern.pattern_len);
old_pattern.pattern = pattern_ext;
ath11k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
&patterns[i]);
} else {
new_pattern = old_pattern;
int j;
new_pattern = patterns[i];
new_pattern.mask = ath_bitmask;
/* convert bitmask to bytemask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
ath_bitmask[j] = 0xff;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
}
}

View File

@ -42,3 +42,13 @@ config ATH12K_TRACING
If unsure, say Y to make it easier to debug problems. But if
you want optimal performance choose N.
config ATH12K_COREDUMP
bool "ath12k coredump"
depends on ATH12K
select WANT_DEV_COREDUMP
help
Enable ath12k coredump collection
If unsure, say Y to make it easier to debug problems. But if
dump collection not required choose N.

View File

@ -27,6 +27,7 @@ ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
ath12k-$(CONFIG_PM) += wow.o
ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View File

@ -148,7 +148,7 @@ struct ath12k_ce_pipe {
void (*send_cb)(struct ath12k_ce_pipe *pipe);
void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
struct tasklet_struct intr_tq;
struct work_struct intr_wq;
struct ath12k_ce_ring *src_ring;
struct ath12k_ce_ring *dest_ring;
struct ath12k_ce_ring *status_ring;

View File

@ -1004,7 +1004,7 @@ void ath12k_core_halt(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
ar->num_created_vdevs = 0;
ar->allocated_vdev_map = 0;
@ -1078,6 +1078,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
wiphy_lock(ah->hw->wiphy);
mutex_lock(&ah->hw_mutex);
switch (ah->state) {
@ -1086,10 +1087,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
mutex_lock(&ar->conf_mutex);
ath12k_core_halt(ar);
mutex_unlock(&ar->conf_mutex);
}
break;
@ -1110,6 +1108,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
}
mutex_unlock(&ah->hw_mutex);
wiphy_unlock(ah->hw->wiphy);
}
complete(&ab->driver_recovery);
@ -1188,6 +1187,7 @@ static void ath12k_core_reset(struct work_struct *work)
ab->is_reset = true;
atomic_set(&ab->recovery_count, 0);
ath12k_coredump_collect(ab);
ath12k_core_pre_reconfigure_recovery(ab);
ath12k_core_post_reconfigure_recovery(ab);
@ -1312,6 +1312,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath12k_core_restart);
INIT_WORK(&ab->reset_work, ath12k_core_reset);
INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);

View File

@ -30,6 +30,7 @@
#include "acpi.h"
#include "wow.h"
#include "debugfs_htt_stats.h"
#include "coredump.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@ -94,6 +95,14 @@ static inline enum wme_ac ath12k_tid_to_ac(u32 tid)
WME_AC_VO);
}
static inline u64 ath12k_le32hilo_to_u64(__le32 hi, __le32 lo)
{
u64 hi64 = le32_to_cpu(hi);
u64 lo64 = le32_to_cpu(lo);
return (hi64 << 32) | lo64;
}
enum ath12k_skb_flags {
ATH12K_SKB_HW_80211_ENCAP = BIT(0),
ATH12K_SKB_CIPHER_SET = BIT(1),
@ -220,8 +229,9 @@ struct ath12k_tx_conf {
};
struct ath12k_key_conf {
bool changed;
enum set_key_cmd cmd;
struct list_head list;
struct ieee80211_sta *sta;
struct ieee80211_key_conf *key;
};
@ -238,10 +248,8 @@ struct ath12k_rekey_data {
bool enable_offload;
};
struct ath12k_vif {
struct ath12k_link_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
u16 ast_hash;
@ -251,13 +259,38 @@ struct ath12k_vif {
u8 search_type;
struct ath12k *ar;
struct ieee80211_vif *vif;
int bank_id;
u8 vdev_id_check_en;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
bool is_created;
bool is_started;
bool is_up;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
struct delayed_work connection_loss_work;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
bool rsnie_present;
bool wpaie_present;
struct ieee80211_chanctx_conf chanctx;
u8 vdev_stats_id;
u32 punct_bitmap;
u8 link_id;
struct ath12k_vif *ahvif;
struct ath12k_rekey_data rekey_data;
};
struct ath12k_vif {
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
struct ieee80211_vif *vif;
struct ath12k_hw *ah;
union {
struct {
u32 uapsd;
@ -275,25 +308,16 @@ struct ath12k_vif {
} ap;
} u;
bool is_created;
bool is_started;
bool is_up;
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
struct delayed_work connection_loss_work;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
bool rsnie_present;
bool wpaie_present;
u32 key_cipher;
u8 tx_encap_type;
u8 vdev_stats_id;
u32 punct_bitmap;
bool ps;
struct ath12k_vif_cache *cache;
struct ath12k_rekey_data rekey_data;
struct ath12k_link_vif deflink;
struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link vif created in FW */
u16 links_map;
/* Must be last - ends in a flexible-array member.
*
@ -306,7 +330,7 @@ struct ath12k_vif {
struct ath12k_vif_iter {
u32 vdev_id;
struct ath12k *ar;
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
};
#define HAL_AST_IDX_INVALID 0xFFFF
@ -441,27 +465,36 @@ struct ath12k_wbm_tx_stats {
u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
};
struct ath12k_sta {
struct ath12k_vif *arvif;
struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
u32 nss;
u32 smps;
enum hal_pn_type pn_type;
struct work_struct update_wk;
struct wiphy_work update_wk;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
u8 link_id;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
};
struct ath12k_sta {
enum hal_pn_type pn_type;
struct ath12k_link_sta deflink;
struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link sta created in FW */
u16 links_map;
};
#define ATH12K_MIN_5G_FREQ 4150
#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
@ -561,13 +594,9 @@ struct ath12k {
u32 chan_tx_pwr;
u32 num_stations;
u32 max_num_stations;
bool monitor_present;
/* To synchronize concurrent synchronous mac80211 callback operations,
* concurrent debugfs configuration and concurrent FW statistics events.
*/
struct mutex conf_mutex;
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
* vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info,
* vdev_stop_status info, scan data, ath12k_sta info, ath12k_link_vif info,
* channel context data, survey info, test mode data.
*/
spinlock_t data_lock;
@ -666,6 +695,7 @@ struct ath12k_hw {
enum ath12k_hw_state state;
bool regd_updated;
bool use_6ghz_regd;
u8 num_radio;
/* Keep last */
@ -782,6 +812,10 @@ struct ath12k_base {
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
struct ath12k_dump_file_data *dump_data;
size_t ath12k_coredump_len;
struct work_struct dump_work;
struct ath12k_htc htc;
struct ath12k_dp dp;
@ -1024,16 +1058,26 @@ static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb)
return (struct ath12k_skb_rxcb *)skb->cb;
}
static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif)
static inline struct ath12k_vif *ath12k_vif_to_ahvif(struct ieee80211_vif *vif)
{
return (struct ath12k_vif *)vif->drv_priv;
}
static inline struct ath12k_sta *ath12k_sta_to_arsta(struct ieee80211_sta *sta)
static inline struct ath12k_sta *ath12k_sta_to_ahsta(struct ieee80211_sta *sta)
{
return (struct ath12k_sta *)sta->drv_priv;
}
static inline struct ieee80211_sta *ath12k_ahsta_to_sta(struct ath12k_sta *ahsta)
{
return container_of((void *)ahsta, struct ieee80211_sta, drv_priv);
}
static inline struct ieee80211_vif *ath12k_ahvif_to_vif(struct ath12k_vif *ahvif)
{
return container_of((void *)ahvif, struct ieee80211_vif, drv_priv);
}
static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab,
int mac_id)
{

View File

@ -0,0 +1,51 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/devcoredump.h>
#include "hif.h"
#include "coredump.h"
#include "debug.h"
enum
ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type(enum ath12k_qmi_target_mem type)
{
enum ath12k_fw_crash_dump_type dump_type;
switch (type) {
case HOST_DDR_REGION_TYPE:
dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
break;
case M3_DUMP_REGION_TYPE:
dump_type = FW_CRASH_DUMP_M3_DUMP;
break;
case PAGEABLE_MEM_REGION_TYPE:
dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
break;
case BDF_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
dump_type = FW_CRASH_DUMP_NONE;
break;
default:
dump_type = FW_CRASH_DUMP_TYPE_MAX;
break;
}
return dump_type;
}
void ath12k_coredump_upload(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, dump_work);
ath12k_info(ab, "Uploading coredump\n");
/* dev_coredumpv() takes ownership of the buffer */
dev_coredumpv(ab->dev, ab->dump_data, ab->ath12k_coredump_len, GFP_KERNEL);
ab->dump_data = NULL;
}
void ath12k_coredump_collect(struct ath12k_base *ab)
{
ath12k_hif_coredump_download(ab);
}

View File

@ -0,0 +1,80 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_COREDUMP_H_
#define _ATH12K_COREDUMP_H_
#define ATH12K_FW_CRASH_DUMP_V2 2
enum ath12k_fw_crash_dump_type {
FW_CRASH_DUMP_PAGING_DATA,
FW_CRASH_DUMP_RDDM_DATA,
FW_CRASH_DUMP_REMOTE_MEM_DATA,
FW_CRASH_DUMP_PAGEABLE_DATA,
FW_CRASH_DUMP_M3_DUMP,
FW_CRASH_DUMP_NONE,
/* keep last */
FW_CRASH_DUMP_TYPE_MAX,
};
#define COREDUMP_TLV_HDR_SIZE 8
struct ath12k_tlv_dump_data {
/* see ath11k_fw_crash_dump_type above */
__le32 type;
/* in bytes */
__le32 tlv_len;
/* pad to 32-bit boundaries as needed */
u8 tlv_data[];
} __packed;
struct ath12k_dump_file_data {
/* "ATH12K-FW-DUMP" */
char df_magic[16];
/* total dump len in bytes */
__le32 len;
/* file dump version */
__le32 version;
/* pci device id */
__le32 chip_id;
/* qrtr instance id */
__le32 qrtr_id;
/* pci domain id */
__le32 bus_id;
guid_t guid;
/* time-of-day stamp */
__le64 tv_sec;
/* time-of-day stamp, nano-seconds */
__le64 tv_nsec;
/* room for growth w/out changing binary format */
u8 unused[128];
u8 data[];
} __packed;
#ifdef CONFIG_ATH12K_COREDUMP
enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type
(enum ath12k_qmi_target_mem type);
void ath12k_coredump_upload(struct work_struct *work);
void ath12k_coredump_collect(struct ath12k_base *ab);
#else
static inline enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type
(enum ath12k_qmi_target_mem type)
{
return FW_CRASH_DUMP_TYPE_MAX;
}
static inline void ath12k_coredump_upload(struct work_struct *work)
{
}
static inline void ath12k_coredump_collect(struct ath12k_base *ab)
{
}
#endif
#endif

View File

@ -15,14 +15,14 @@ static ssize_t ath12k_write_simulate_radar(struct file *file,
struct ath12k *ar = file->private_data;
int ret;
mutex_lock(&ar->conf_mutex);
wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
ret = ath12k_wmi_simulate_radar(ar);
if (ret)
goto exit;
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -123,12 +123,21 @@ struct ath12k_htt_extd_stats_msg {
/* htt_dbg_ext_stats_type */
enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@ -139,6 +148,7 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
HTT_STATS_TX_PDEV_SIFS_TAG = 2,
HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
HTT_STATS_STRING_TAG = 5,
HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
@ -151,22 +161,47 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
HTT_STATS_TX_DE_CMN_TAG = 23,
HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
HTT_STATS_SFM_CMN_TAG = 26,
HTT_STATS_SRING_STATS_TAG = 27,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
HTT_STATS_TX_SCHED_CMN_TAG = 37,
HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
HTT_STATS_SFM_CLIENT_USER_TAG = 41,
HTT_STATS_SFM_CLIENT_TAG = 42,
HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46,
HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47,
HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48,
HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49,
HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50,
HTT_STATS_HW_INTR_MISC_TAG = 54,
HTT_STATS_HW_PDEV_ERRS_TAG = 56,
HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
HTT_STATS_WHAL_TX_TAG = 66,
HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70,
HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71,
HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
HTT_STATS_HW_WAR_TAG = 89,
HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
HTT_STATS_MU_PPDU_DIST_TAG = 129,
HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130,
HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135,
HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137,
HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138,
HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139,
HTT_STATS_DMAC_RESET_STATS_TAG = 155,
HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165,
HTT_STATS_MAX_TAG,
};
@ -690,4 +725,401 @@ struct ath12k_htt_tx_de_compl_stats_tlv {
__le32 tqm_bypass_frame;
} __packed;
enum ath12k_htt_tx_mumimo_grp_invalid_reason_code_stats {
ATH12K_HTT_TX_MUMIMO_GRP_VALID,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NUM_MU_USERS_EXCEEDED_MU_MAX_USERS,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_SCHED_ALGO_NOT_MU_COMPATIBLE_GID,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NON_PRIMARY_GRP,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_ZERO_CANDIDATES,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MORE_CANDIDATES,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_SIZE_EXCEED_NSS,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_INELIGIBLE,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_EFF_MU_TPUT_OMBPS,
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE,
};
#define ATH12K_HTT_NUM_AC_WMM 0x4
#define ATH12K_HTT_MAX_NUM_SBT_INTR 4
#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4
#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8
#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8
#define ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS 7
#define ATH12K_HTT_TX_NUM_OFDMA_USER_STATS 74
#define ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS 8
#define ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ 8
#define ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS 10
#define ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE \
ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE
#define ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS \
(ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ * ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE)
struct ath12k_htt_tx_selfgen_cmn_stats_tlv {
__le32 mac_id__word;
__le32 su_bar;
__le32 rts;
__le32 cts2self;
__le32 qos_null;
__le32 delayed_bar_1;
__le32 delayed_bar_2;
__le32 delayed_bar_3;
__le32 delayed_bar_4;
__le32 delayed_bar_5;
__le32 delayed_bar_6;
__le32 delayed_bar_7;
} __packed;
struct ath12k_htt_tx_selfgen_ac_stats_tlv {
__le32 ac_su_ndpa;
__le32 ac_su_ndp;
__le32 ac_mu_mimo_ndpa;
__le32 ac_mu_mimo_ndp;
__le32 ac_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1];
} __packed;
struct ath12k_htt_tx_selfgen_ax_stats_tlv {
__le32 ax_su_ndpa;
__le32 ax_su_ndp;
__le32 ax_mu_mimo_ndpa;
__le32 ax_mu_mimo_ndp;
__le32 ax_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1];
__le32 ax_basic_trigger;
__le32 ax_bsr_trigger;
__le32 ax_mu_bar_trigger;
__le32 ax_mu_rts_trigger;
__le32 ax_ulmumimo_trigger;
} __packed;
struct ath12k_htt_tx_selfgen_be_stats_tlv {
__le32 be_su_ndpa;
__le32 be_su_ndp;
__le32 be_mu_mimo_ndpa;
__le32 be_mu_mimo_ndp;
__le32 be_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
__le32 be_basic_trigger;
__le32 be_bsr_trigger;
__le32 be_mu_bar_trigger;
__le32 be_mu_rts_trigger;
__le32 be_ulmumimo_trigger;
__le32 be_su_ndpa_queued;
__le32 be_su_ndp_queued;
__le32 be_mu_mimo_ndpa_queued;
__le32 be_mu_mimo_ndp_queued;
__le32 be_mu_mimo_brpoll_queued[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
__le32 be_ul_mumimo_trigger[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
} __packed;
struct ath12k_htt_tx_selfgen_ac_err_stats_tlv {
__le32 ac_su_ndp_err;
__le32 ac_su_ndpa_err;
__le32 ac_mu_mimo_ndpa_err;
__le32 ac_mu_mimo_ndp_err;
__le32 ac_mu_mimo_brp1_err;
__le32 ac_mu_mimo_brp2_err;
__le32 ac_mu_mimo_brp3_err;
} __packed;
struct ath12k_htt_tx_selfgen_ax_err_stats_tlv {
__le32 ax_su_ndp_err;
__le32 ax_su_ndpa_err;
__le32 ax_mu_mimo_ndpa_err;
__le32 ax_mu_mimo_ndp_err;
__le32 ax_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1];
__le32 ax_basic_trigger_err;
__le32 ax_bsr_trigger_err;
__le32 ax_mu_bar_trigger_err;
__le32 ax_mu_rts_trigger_err;
__le32 ax_ulmumimo_trigger_err;
} __packed;
struct ath12k_htt_tx_selfgen_be_err_stats_tlv {
__le32 be_su_ndp_err;
__le32 be_su_ndpa_err;
__le32 be_mu_mimo_ndpa_err;
__le32 be_mu_mimo_ndp_err;
__le32 be_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
__le32 be_basic_trigger_err;
__le32 be_bsr_trigger_err;
__le32 be_mu_bar_trigger_err;
__le32 be_mu_rts_trigger_err;
__le32 be_ulmumimo_trigger_err;
__le32 be_mu_mimo_brp_err_num_cbf_rxd[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
__le32 be_su_ndpa_flushed;
__le32 be_su_ndp_flushed;
__le32 be_mu_mimo_ndpa_flushed;
__le32 be_mu_mimo_ndp_flushed;
__le32 be_mu_mimo_brpoll_flushed[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1];
__le32 be_ul_mumimo_trigger_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
} __packed;
enum ath12k_htt_tx_selfgen_sch_tsflag_error_stats {
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FLUSH_RCVD_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FILT_SCHED_CMD_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_MISMATCH_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_MIMO_CTRL_MISMATCH_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_BW_MISMATCH_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RETRY_COUNT_FAIL_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_TOO_LATE_RECEIVED_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_SIFS_STALL_NO_NEXT_CMD_ERR,
ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS
};
struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv {
__le32 ac_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ac_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ac_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 ac_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ac_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ac_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 ac_mu_mimo_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ac_mu_mimo_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
} __packed;
struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv {
__le32 ax_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 ax_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 ax_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 ax_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 ax_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 ax_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 ax_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
} __packed;
struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv {
__le32 be_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 be_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 be_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 be_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 be_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
__le32 be_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS];
__le32 be_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS];
} __packed;
struct ath12k_htt_stats_string_tlv {
DECLARE_FLEX_ARRAY(__le32, data);
} __packed;
#define ATH12K_HTT_SRING_STATS_MAC_ID GENMASK(7, 0)
#define ATH12K_HTT_SRING_STATS_RING_ID GENMASK(15, 8)
#define ATH12K_HTT_SRING_STATS_ARENA GENMASK(23, 16)
#define ATH12K_HTT_SRING_STATS_EP BIT(24)
#define ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0)
#define ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16)
#define ATH12K_HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0)
#define ATH12K_HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16)
#define ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0)
#define ATH12K_HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16)
#define ATH12K_HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0)
#define ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16)
struct ath12k_htt_sring_stats_tlv {
__le32 mac_id__ring_id__arena__ep;
__le32 base_addr_lsb;
__le32 base_addr_msb;
__le32 ring_size;
__le32 elem_size;
__le32 num_avail_words__num_valid_words;
__le32 head_ptr__tail_ptr;
__le32 consumer_empty__producer_full;
__le32 prefetch_count__internal_tail_ptr;
} __packed;
struct ath12k_htt_sfm_cmn_tlv {
__le32 mac_id__word;
__le32 buf_total;
__le32 mem_empty;
__le32 deallocate_bufs;
__le32 num_records;
} __packed;
struct ath12k_htt_sfm_client_tlv {
__le32 client_id;
__le32 buf_min;
__le32 buf_max;
__le32 buf_busy;
__le32 buf_alloc;
__le32 buf_avail;
__le32 num_users;
} __packed;
struct ath12k_htt_sfm_client_user_tlv {
DECLARE_FLEX_ARRAY(__le32, dwords_used_by_user_n);
} __packed;
struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv {
__le32 mu_mimo_sch_posted;
__le32 mu_mimo_sch_failed;
__le32 mu_mimo_ppdu_posted;
__le32 ac_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS];
__le32 ax_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
__le32 ax_ofdma_sch_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
__le32 ax_ul_ofdma_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
__le32 ax_ul_ofdma_bsr_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
__le32 ax_ul_ofdma_bar_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
__le32 ax_ul_ofdma_brp_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS];
__le32 ax_ul_mumimo_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS];
__le32 ax_ul_mumimo_brp_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS];
__le32 ac_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS];
__le32 ax_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
__le32 be_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
__le32 be_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
__le32 ac_mu_mimo_grp_sz_ext[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS];
} __packed;
struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv {
__le32 dl_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
__le32 dl_mumimo_grp_best_num_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
__le32 dl_mumimo_grp_eligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
__le32 dl_mumimo_grp_ineligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
__le32 dl_mumimo_grp_invalid[ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS];
__le32 dl_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS];
__le32 ul_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ];
__le32 ul_mumimo_grp_best_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS];
__le32 ul_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS];
} __packed;
enum ath12k_htt_stats_tx_sched_modes {
ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC = 0,
ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX,
ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX,
ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_BE,
ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_BE
};
struct ath12k_htt_tx_pdev_mpdu_stats_tlv {
__le32 mpdus_queued_usr;
__le32 mpdus_tried_usr;
__le32 mpdus_failed_usr;
__le32 mpdus_requeued_usr;
__le32 err_no_ba_usr;
__le32 mpdu_underrun_usr;
__le32 ampdu_underrun_usr;
__le32 user_index;
__le32 tx_sched_mode;
} __packed;
struct ath12k_htt_pdev_stats_cca_counters_tlv {
__le32 tx_frame_usec;
__le32 rx_frame_usec;
__le32 rx_clear_usec;
__le32 my_rx_frame_usec;
__le32 usec_cnt;
__le32 med_rx_idle_usec;
__le32 med_tx_idle_global_usec;
__le32 cca_obss_usec;
} __packed;
struct ath12k_htt_pdev_cca_stats_hist_v1_tlv {
__le32 chan_num;
__le32 num_records;
__le32 valid_cca_counters_bitmap;
__le32 collection_interval;
} __packed;
struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_obss_tx_ppdu_success;
__le32 num_obss_tx_ppdu_failure;
__le32 num_sr_tx_transmissions;
__le32 num_spatial_reuse_opportunities;
__le32 num_non_srg_opportunities;
__le32 num_non_srg_ppdu_tried;
__le32 num_non_srg_ppdu_success;
__le32 num_srg_opportunities;
__le32 num_srg_ppdu_tried;
__le32 num_srg_ppdu_success;
__le32 num_psr_opportunities;
__le32 num_psr_ppdu_tried;
__le32 num_psr_ppdu_success;
__le32 num_non_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM];
__le32 num_non_srg_success_ac[ATH12K_HTT_NUM_AC_WMM];
__le32 num_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM];
__le32 num_srg_success_per_ac[ATH12K_HTT_NUM_AC_WMM];
__le32 num_obss_min_dur_check_flush_cnt;
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
struct ath12k_htt_dmac_reset_stats_tlv {
__le32 reset_count;
__le32 reset_time_lo_ms;
__le32 reset_time_hi_ms;
__le32 disengage_time_lo_ms;
__le32 disengage_time_hi_ms;
__le32 engage_time_lo_ms;
__le32 engage_time_hi_ms;
__le32 disengage_count;
__le32 engage_count;
__le32 drain_dest_ring_mask;
} __packed;
struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv {
__le32 mac_id__word;
__le32 rate_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM];
__le32 rate_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM];
__le32 rate_based_dlofdma_probing_cnt[ATH12K_HTT_NUM_AC_WMM];
__le32 rate_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM];
__le32 chan_acc_lat_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM];
__le32 chan_acc_lat_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM];
__le32 chan_acc_lat_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM];
__le32 downgrade_to_dl_su_ru_alloc_fail[ATH12K_HTT_NUM_AC_WMM];
__le32 candidate_list_single_user_disable_ofdma[ATH12K_HTT_NUM_AC_WMM];
__le32 dl_cand_list_dropped_high_ul_qos_weight[ATH12K_HTT_NUM_AC_WMM];
__le32 ax_dlofdma_disabled_due_to_pipelining[ATH12K_HTT_NUM_AC_WMM];
__le32 dlofdma_disabled_su_only_eligible[ATH12K_HTT_NUM_AC_WMM];
__le32 dlofdma_disabled_consec_no_mpdus_tried[ATH12K_HTT_NUM_AC_WMM];
__le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM];
} __packed;
enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4,
ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS,
};
#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16
#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5
#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4
#define ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS 4
struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv {
__le32 mac_id__word;
__le32 be_ofdma_tx_ldpc;
__le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
__le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS];
__le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
__le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
__le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS];
} __packed;
#endif

View File

@ -327,20 +327,22 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
}
static
u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
struct ath12k_link_vif *arvif)
{
u32 bank_config = 0;
struct ath12k_vif *ahvif = arvif->ahvif;
/* Only valid for raw frames with HW crypto enabled.
* With SW crypto, mac80211 sets key per packet
*/
if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
bank_config |=
u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher),
HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
bank_config |= u32_encode_bits(arvif->tx_encap_type,
bank_config |= u32_encode_bits(ahvif->tx_encap_type,
HAL_TX_BANK_CONFIG_ENCAP_TYPE);
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
@ -355,7 +357,7 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif
HAL_TX_ADDRY_EN),
HAL_TX_BANK_CONFIG_ADDRY_EN);
bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0,
HAL_TX_BANK_CONFIG_MESH_EN) |
u32_encode_bits(arvif->vdev_id_check_en,
HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
@ -365,7 +367,8 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif
return bank_config;
}
static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab,
struct ath12k_link_vif *arvif,
struct ath12k_dp *dp)
{
int bank_id = DP_INVALID_BANK_ID;
@ -1099,9 +1102,9 @@ int ath12k_dp_htt_connect(struct ath12k_dp *dp)
return 0;
}
static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
{
switch (arvif->vdev_type) {
switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
/* TODO: Verify the search type and flags since ast hash
* is not part of peer mapv3
@ -1120,7 +1123,7 @@ static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
}
}
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif)
{
struct ath12k_base *ab = ar->ab;
@ -1162,7 +1165,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
spin_lock_bh(&dp->rx_desc_lock);
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
desc_info = dp->spt_info->rxbaddr[i];
desc_info = dp->rxbaddr[i];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
if (!desc_info[j].in_use) {
@ -1181,11 +1184,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
}
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
if (!dp->spt_info->rxbaddr[i])
if (!dp->rxbaddr[i])
continue;
kfree(dp->spt_info->rxbaddr[i]);
dp->spt_info->rxbaddr[i] = NULL;
kfree(dp->rxbaddr[i]);
dp->rxbaddr[i] = NULL;
}
spin_unlock_bh(&dp->rx_desc_lock);
@ -1202,10 +1205,16 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
if (!skb)
continue;
skb_cb = ATH12K_SKB_CB(skb);
ar = skb_cb->ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
/* if we are unregistering, hw would've been destroyed and
* ar is no longer valid.
*/
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
skb_cb = ATH12K_SKB_CB(skb);
ar = skb_cb->ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
skb->len, DMA_TO_DEVICE);
@ -1220,11 +1229,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
if (!dp->spt_info->txbaddr[tx_spt_page])
if (!dp->txbaddr[tx_spt_page])
continue;
kfree(dp->spt_info->txbaddr[tx_spt_page]);
dp->spt_info->txbaddr[tx_spt_page] = NULL;
kfree(dp->txbaddr[tx_spt_page]);
dp->txbaddr[tx_spt_page] = NULL;
}
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
@ -1241,6 +1250,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
}
kfree(dp->spt_info);
dp->spt_info = NULL;
}
static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
@ -1276,8 +1286,10 @@ void ath12k_dp_free(struct ath12k_base *ab)
ath12k_dp_rx_reo_cmd_list_cleanup(ab);
for (i = 0; i < ab->hw_params->max_tx_ring; i++)
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
kfree(dp->tx_ring[i].tx_status);
dp->tx_ring[i].tx_status = NULL;
}
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
@ -1415,7 +1427,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
dp->spt_info->rxbaddr[i] = &rx_descs[0];
dp->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
@ -1445,7 +1457,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
dp->txbaddr[tx_spt_page] = &tx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);

View File

@ -16,6 +16,7 @@ struct ath12k_base;
struct ath12k_peer;
struct ath12k_dp;
struct ath12k_vif;
struct ath12k_link_vif;
struct hal_tcl_status_ring;
struct ath12k_ext_irq_grp;
@ -300,8 +301,6 @@ struct ath12k_tx_desc_info {
struct ath12k_spt_info {
dma_addr_t paddr;
u64 *vaddr;
struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
};
struct ath12k_reo_queue_ref {
@ -352,6 +351,8 @@ struct ath12k_dp {
struct ath12k_spt_info *spt_info;
u32 num_spt_pages;
u32 rx_ppt_base;
struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
struct list_head rx_desc_free_list;
/* protects the free desc list */
spinlock_t rx_desc_lock;
@ -1799,7 +1800,7 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
struct ath12k_ext_irq_grp *irq_grp,
int budget);
int ath12k_dp_htt_connect(struct ath12k_dp *dp);
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif);
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif);
void ath12k_dp_free(struct ath12k_base *ab);
int ath12k_dp_alloc(struct ath12k_base *ab);
void ath12k_dp_cc_config(struct ath12k_base *ab);

View File

@ -26,15 +26,12 @@ ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *st
void *ppduinfo,
struct hal_rx_user_status *rx_user_status)
{
u32 mpdu_ok_byte_count = __le32_to_cpu(stats->mpdu_ok_cnt);
u32 mpdu_err_byte_count = __le32_to_cpu(stats->mpdu_err_cnt);
rx_user_status->mpdu_ok_byte_count =
u32_get_bits(mpdu_ok_byte_count,
HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT);
le32_get_bits(stats->info7,
HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT);
rx_user_status->mpdu_err_byte_count =
u32_get_bits(mpdu_err_byte_count,
HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT);
le32_get_bits(stats->info8,
HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT);
}
static void
@ -593,12 +590,20 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
struct hal_rx_ppdu_start *ppdu_start =
(struct hal_rx_ppdu_start *)tlv_data;
u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32,
ppdu_start->ppdu_start_ts_31_0);
info[0] = __le32_to_cpu(ppdu_start->info0);
ppdu_info->ppdu_id =
u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID);
ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
ppdu_info->ppdu_id = u32_get_bits(info[0],
HAL_RX_PPDU_START_INFO0_PPDU_ID);
info[1] = __le32_to_cpu(ppdu_start->info1);
ppdu_info->chan_num = u32_get_bits(info[1],
HAL_RX_PPDU_START_INFO1_CHAN_NUM);
ppdu_info->freq = u32_get_bits(info[1],
HAL_RX_PPDU_START_INFO1_CHAN_FREQ);
ppdu_info->ppdu_ts = ppdu_ts;
if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
@ -726,33 +731,20 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
case HAL_PHYRX_RSSI_LEGACY: {
struct hal_rx_phyrx_rssi_legacy_info *rssi =
(struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
u32 reception_type = 0;
u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]);
info[0] = __le32_to_cpu(rssi->info0);
info[1] = __le32_to_cpu(rssi->info1);
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
u32_get_bits(info[0],
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB);
reception_type =
u32_get_bits(rssi_legacy_info,
HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION);
u32_get_bits(info[1],
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB);
switch (reception_type) {
case HAL_RECEPTION_TYPE_ULOFMDA:
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
break;
case HAL_RECEPTION_TYPE_ULMIMO:
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
default:
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
ppdu_info->bw = u32_get_bits(info[0],
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
case HAL_RXPCU_PPDU_END_INFO: {
@ -860,27 +852,29 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu)
static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
struct sk_buff *head_msdu,
struct sk_buff *tail_msdu)
{
u32 rx_pkt_offset, l2_hdr_offset;
rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
l2_hdr_offset =
ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset);
}
static struct sk_buff *
ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
u32 mac_id, struct sk_buff *head_msdu,
ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id,
struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct ieee80211_rx_status *rxs, bool *fcs_err)
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf;
struct hal_rx_desc *rx_desc;
struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
struct hal_rx_desc *rx_desc, *tail_rx_desc;
u8 *hdr_desc, *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
u32 err_bitmap;
u32 err_bitmap, frag_list_sum_len = 0;
mpdu_buf = NULL;
@ -888,24 +882,30 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data;
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
*fcs_err = true;
decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc);
decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc);
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu);
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
prev_buf = head_msdu;
msdu = head_msdu->next;
head_frag_list = NULL;
while (msdu) {
ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
if (!head_frag_list)
head_frag_list = msdu;
frag_list_sum_len += msdu->len;
prev_buf = msdu;
msdu = msdu->next;
}
@ -913,6 +913,12 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
prev_buf->next = NULL;
skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
if (head_frag_list) {
skb_shinfo(head_msdu)->frag_list = head_frag_list;
head_msdu->data_len = frag_list_sum_len;
head_msdu->len += head_msdu->data_len;
head_msdu->next = NULL;
}
} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
u8 qos_pkt = 0;
@ -929,7 +935,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
msdu = head_msdu;
while (msdu) {
ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
@ -1135,7 +1141,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
struct sk_buff *head_msdu,
struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
{
@ -1144,7 +1150,8 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
struct ieee80211_rx_status *rxs = &dp->rx_status;
bool fcs_err = false;
mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu,
mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id,
head_msdu, tail_msdu,
rxs, &fcs_err);
if (!mon_skb)
goto mon_deliver_fail;
@ -1252,7 +1259,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
if (head_msdu && tail_msdu) {
ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
ppdu_info, napi);
tail_msdu, ppdu_info, napi);
}
kfree(mon_mpdu);
@ -1948,15 +1955,16 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct dp_mon_mpdu *tmp, *mon_mpdu;
struct sk_buff *head_msdu;
struct sk_buff *head_msdu, *tail_msdu;
list_for_each_entry_safe(mon_mpdu, tmp,
&tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
head_msdu = mon_mpdu->head;
tail_msdu = mon_mpdu->tail;
if (head_msdu)
ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
@ -2165,7 +2173,7 @@ ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_st
}
static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
struct ath12k_sta *arsta,
struct ath12k_link_sta *arsta,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
@ -2321,7 +2329,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 uid)
{
struct ath12k_sta *arsta = NULL;
struct ath12k_sta *ahsta;
struct ath12k_link_sta *arsta;
struct ath12k_rx_peer_stats *rx_stats = NULL;
struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
struct ath12k_peer *peer;
@ -2338,7 +2347,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
return;
}
arsta = ath12k_sta_to_arsta(peer->sta);
ahsta = ath12k_sta_to_ahsta(peer->sta);
arsta = &ahsta->deflink;
rx_stats = arsta->rx_stats;
if (!rx_stats)
@ -2445,7 +2455,8 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
struct dp_srng *mon_dst_ring;
struct hal_srng *srng;
struct dp_rxdma_mon_ring *buf_ring;
struct ath12k_sta *arsta = NULL;
struct ath12k_sta *ahsta = NULL;
struct ath12k_link_sta *arsta;
struct ath12k_peer *peer;
u64 cookie;
int num_buffs_reaped = 0, srng_id, buf_id;
@ -2514,7 +2525,8 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
}
if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
arsta = ath12k_sta_to_arsta(peer->sta);
ahsta = ath12k_sta_to_ahsta(peer->sta);
arsta = &ahsta->deflink;
ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
ppdu_info);
} else if ((ppdu_info->fc_valid) &&

View File

@ -1041,13 +1041,14 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
struct ieee80211_ampdu_params *params)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
struct ath12k_link_sta *arsta = &ahsta->deflink;
int vdev_id = arsta->arvif->vdev_id;
int ret;
ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->pn_type);
params->ssn, arsta->ahsta->pn_type);
if (ret)
ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
@ -1059,7 +1060,8 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta);
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
struct ath12k_link_sta *arsta = &ahsta->deflink;
int vdev_id = arsta->arvif->vdev_id;
bool active;
int ret;
@ -1091,7 +1093,7 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
return ret;
}
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key)
@ -1313,7 +1315,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ieee80211_sta *sta;
struct ath12k_sta *arsta;
struct ath12k_sta *ahsta;
struct ath12k_link_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
@ -1394,7 +1397,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
}
sta = peer->sta;
arsta = ath12k_sta_to_arsta(sta);
ahsta = ath12k_sta_to_ahsta(sta);
arsta = &ahsta->deflink;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));

View File

@ -88,7 +88,7 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
struct ieee80211_ampdu_params *params);
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
struct ieee80211_ampdu_params *params);
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);

View File

@ -10,7 +10,7 @@
#include "hw.h"
static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath12k_base *ab = arvif->ar->ab;
@ -216,7 +216,7 @@ static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
return ret;
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct sk_buff *skb)
{
struct ath12k_base *ab = ar->ab;
@ -230,6 +230,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb_ext_desc;
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath12k_vif *ahvif = arvif->ahvif;
struct dp_tx_ring *tx_ring;
u8 pool_id;
u8 hal_ring_id;
@ -274,7 +275,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
ti.bank_id = arvif->bank_id;
ti.meta_data_flags = arvif->tcl_metadata;
if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
ti.encrypt_type =
@ -376,7 +377,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
ti.desc_id = tx_desc->desc_id;
ti.data_len = skb->len;
skb_cb->paddr = ti.paddr;
skb_cb->vif = arvif->vif;
skb_cb->vif = ahvif->vif;
skb_cb->ar = ar;
if (msdu_ext_desc) {

View File

@ -16,7 +16,7 @@ struct ath12k_dp_htt_wbm_tx_status {
};
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct sk_buff *skb);
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);

View File

@ -385,13 +385,13 @@ static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
RX_MSDU_END_QCN9274_INFO5_TID);
RX_MSDU_END_INFO5_TID);
}
static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
@ -846,13 +846,13 @@ static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
{
return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
RX_MSDU_END_QCN9274_INFO5_TID);
RX_MSDU_END_INFO5_TID);
}
static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
@ -1198,7 +1198,7 @@ static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
{
return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP);
RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
}
static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
@ -1216,7 +1216,7 @@ static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end,
sizeof(struct rx_msdu_end_wcn7850));
sizeof(struct rx_msdu_end_qcn9274));
}
static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_RX_H
@ -156,6 +156,7 @@ struct hal_rx_mon_ppdu_info {
u32 preamble_type;
u32 mpdu_len;
u16 chan_num;
u16 freq;
u16 tcp_msdu_count;
u16 tcp_ack_msdu_count;
u16 udp_msdu_count;
@ -232,21 +233,25 @@ struct hal_rx_mon_ppdu_info {
u8 medium_prot_type;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
#define HAL_RX_PPDU_START_INFO1_CHAN_NUM GENMASK(15, 0)
#define HAL_RX_PPDU_START_INFO1_CHAN_FREQ GENMASK(31, 16)
struct hal_rx_ppdu_start {
__le32 info0;
__le32 chan_num;
__le32 ppdu_start_ts;
__le32 info1;
__le32 ppdu_start_ts_31_0;
__le32 ppdu_start_ts_63_32;
__le32 rsvd[2];
} __packed;
#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(26, 16)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(10, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(11)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(12)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(13)
#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(24, 21)
#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
@ -262,8 +267,8 @@ struct hal_rx_ppdu_start {
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT GENMASK(24, 0)
#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT GENMASK(24, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
struct hal_rx_ppdu_end_user_stats {
__le32 rsvd0[2];
@ -278,9 +283,9 @@ struct hal_rx_ppdu_end_user_stats {
__le32 usr_resp_ref;
__le32 info6;
__le32 rsvd3[4];
__le32 mpdu_ok_cnt;
__le32 info7;
__le32 rsvd4;
__le32 mpdu_err_cnt;
__le32 info8;
__le32 rsvd5[2];
__le32 usr_resp_ref_ext;
__le32 rsvd6;
@ -436,23 +441,27 @@ enum hal_rx_ul_reception_type {
HAL_RECEPTION_TYPE_FRAMELESS
};
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8)
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION GENMASK(3, 0)
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
struct hal_rx_phyrx_rssi_legacy_info {
__le32 rsvd[35];
__le32 info0;
__le32 rsvd0[39];
__le32 info1;
__le32 rsvd1;
} __packed;
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16)
#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16)
#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
struct hal_rx_mpdu_start {
__le32 rsvd0[9];
__le32 info0;
__le32 info1;
__le32 rsvd1[11];
__le32 rsvd1[2];
__le32 info2;
__le32 rsvd2[9];
__le32 rsvd2[16];
} __packed;
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)

View File

@ -31,6 +31,7 @@ struct ath12k_hif_ops {
void (*ce_irq_disable)(struct ath12k_base *ab);
void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
int (*panic_handler)(struct ath12k_base *ab);
void (*coredump_download)(struct ath12k_base *ab);
};
static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@ -156,4 +157,9 @@ static inline int ath12k_hif_panic_handler(struct ath12k_base *ab)
return ab->hif.ops->panic_handler(ab);
}
static inline void ath12k_hif_coredump_download(struct ath12k_base *ab)
{
if (ab->hif.ops->coredump_download)
ab->hif.ops->coredump_download(ab);
}
#endif /* ATH12K_HIF_H */

View File

@ -913,7 +913,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_cfg = 0,
.rfkill_on_level = 0,
.rddm_size = 0,
.rddm_size = 0x600000,
.def_num_link = 0,
.max_mlo_peer = 256,
@ -1069,7 +1069,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_cfg = 0,
.rfkill_on_level = 0,
.rddm_size = 0,
.rddm_size = 0x600000,
.def_num_link = 0,
.max_mlo_peer = 256,

File diff suppressed because it is too large Load Diff

View File

@ -41,6 +41,9 @@ struct ath12k_generic_iter {
#define ATH12K_TX_POWER_MAX_VAL 70
#define ATH12K_TX_POWER_MIN_VAL 0
#define ATH12K_DEFAULT_LINK_ID 0
#define ATH12K_INVALID_LINK_ID 255
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
@ -65,9 +68,9 @@ u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
void __ath12k_mac_scan_finish(struct ath12k *ar);
void ath12k_mac_scan_finish(struct ath12k *ar);
struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
u32 vdev_id);
struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
u32 vdev_id);
struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id);
struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id);
@ -82,7 +85,7 @@ int ath12k_mac_rfkill_config(struct ath12k *ar);
int ath12k_mac_wait_tx_complete(struct ath12k *ar);
void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif,
int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);

View File

@ -649,3 +649,8 @@ void ath12k_mhi_resume(struct ath12k_pci *ab_pci)
{
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME);
}
void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic)
{
mhi_download_rddm_image(mhi_ctrl, in_panic);
}

View File

@ -43,5 +43,5 @@ void ath12k_mhi_clear_vector(struct ath12k_base *ab);
void ath12k_mhi_suspend(struct ath12k_pci *ar_pci);
void ath12k_mhi_resume(struct ath12k_pci *ar_pci);
void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic);
#endif

View File

@ -69,20 +69,20 @@ static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info
return len;
}
static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie,
static void ath12k_p2p_noa_ie_assign(struct ath12k_link_vif *arvif, void *ie,
size_t len)
{
struct ath12k *ar = arvif->ar;
lockdep_assert_held(&ar->data_lock);
kfree(arvif->u.ap.noa_data);
kfree(arvif->ahvif->u.ap.noa_data);
arvif->u.ap.noa_data = ie;
arvif->u.ap.noa_len = len;
arvif->ahvif->u.ap.noa_data = ie;
arvif->ahvif->u.ap.noa_len = len;
}
static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
static void __ath12k_p2p_noa_update(struct ath12k_link_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ath12k *ar = arvif->ar;
@ -105,7 +105,7 @@ static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
ath12k_p2p_noa_ie_assign(arvif, ie, len);
}
void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa)
{
struct ath12k *ar = arvif->ar;
@ -118,9 +118,12 @@ void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_p2p_noa_arg *arg = data;
struct ath12k_link_vif *arvif;
WARN_ON(!rcu_read_lock_any_held());
arvif = &ahvif->deflink;
if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id)
return;

View File

@ -16,7 +16,7 @@ struct ath12k_p2p_noa_arg {
const struct ath12k_wmi_p2p_noa_info *noa;
};
void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif,
const struct ath12k_wmi_p2p_noa_info *noa);
void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
const struct ath12k_wmi_p2p_noa_info *noa);

View File

@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include "pci.h"
#include "core.h"
@ -425,9 +427,9 @@ static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
}
}
static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
static void ath12k_pci_ce_workqueue(struct work_struct *work)
{
struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
@ -449,7 +451,7 @@ static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
disable_irq_nosync(ab->irq_num[irq_idx]);
tasklet_schedule(&ce_pipe->intr_tq);
queue_work(system_bh_wq, &ce_pipe->intr_wq);
return IRQ_HANDLED;
}
@ -675,7 +677,7 @@ static int ath12k_pci_config_irq(struct ath12k_base *ab)
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
INIT_WORK(&ce_pipe->intr_wq, ath12k_pci_ce_workqueue);
ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
ab_pci->irq_flags, irq_name[irq_idx],
@ -962,7 +964,7 @@ static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
PCI_EXP_LNKCTL_ASPMC);
}
static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
static void ath12k_pci_cancel_workqueue(struct ath12k_base *ab)
{
int i;
@ -972,7 +974,7 @@ static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
cancel_work_sync(&ce_pipe->intr_wq);
}
}
@ -980,7 +982,7 @@ static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
{
ath12k_pci_ce_irqs_disable(ab);
ath12k_pci_sync_ce_irqs(ab);
ath12k_pci_kill_tasklets(ab);
ath12k_pci_cancel_workqueue(ab);
}
int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@ -1259,6 +1261,186 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
ab_pci->pci_ops->release(ab);
}
#ifdef CONFIG_ATH12K_COREDUMP
static int ath12k_pci_coredump_calculate_size(struct ath12k_base *ab, u32 *dump_seg_sz)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
struct image_info *rddm_img, *fw_img;
struct ath12k_tlv_dump_data *dump_tlv;
enum ath12k_fw_crash_dump_type mem_type;
u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
struct ath12k_dump_file_data *file_data;
int i;
rddm_img = mhi_ctrl->rddm_image;
if (!rddm_img) {
ath12k_err(ab, "No RDDM dump found\n");
return 0;
}
fw_img = mhi_ctrl->fbc_image;
for (i = 0; i < fw_img->entries ; i++) {
if (!fw_img->mhi_buf[i].buf)
continue;
paging_tlv_sz += fw_img->mhi_buf[i].len;
}
dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
for (i = 0; i < rddm_img->entries; i++) {
if (!rddm_img->mhi_buf[i].buf)
continue;
rddm_tlv_sz += rddm_img->mhi_buf[i].len;
}
dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
if (mem_type == FW_CRASH_DUMP_NONE)
continue;
if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
ath12k_dbg(ab, ATH12K_DBG_PCI,
"target mem region type %d not supported",
ab->qmi.target_mem[i].type);
continue;
}
if (!ab->qmi.target_mem[i].paddr)
continue;
dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
}
for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
if (!dump_seg_sz[i])
continue;
len += sizeof(*dump_tlv) + dump_seg_sz[i];
}
if (len)
len += sizeof(*file_data);
return len;
}
static void ath12k_pci_coredump_download(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
struct image_info *rddm_img, *fw_img;
struct timespec64 timestamp;
int i, len, mem_idx;
enum ath12k_fw_crash_dump_type mem_type;
struct ath12k_dump_file_data *file_data;
struct ath12k_tlv_dump_data *dump_tlv;
size_t hdr_len = sizeof(*file_data);
void *buf;
u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 };
ath12k_mhi_coredump(mhi_ctrl, false);
len = ath12k_pci_coredump_calculate_size(ab, dump_seg_sz);
if (!len) {
ath12k_warn(ab, "No crash dump data found for devcoredump");
return;
}
rddm_img = mhi_ctrl->rddm_image;
fw_img = mhi_ctrl->fbc_image;
/* dev_coredumpv() requires vmalloc data */
buf = vzalloc(len);
if (!buf)
return;
ab->dump_data = buf;
ab->ath12k_coredump_len = len;
file_data = ab->dump_data;
strscpy(file_data->df_magic, "ATH12K-FW-DUMP", sizeof(file_data->df_magic));
file_data->len = cpu_to_le32(len);
file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2);
file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
guid_gen(&file_data->guid);
ktime_get_real_ts64(&timestamp);
file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
buf += hdr_len;
dump_tlv = buf;
dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
buf += COREDUMP_TLV_HDR_SIZE;
/* append all segments together as they are all part of a single contiguous
* block of memory
*/
for (i = 0; i < fw_img->entries ; i++) {
if (!fw_img->mhi_buf[i].buf)
continue;
memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
fw_img->mhi_buf[i].len);
buf += fw_img->mhi_buf[i].len;
}
dump_tlv = buf;
dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
buf += COREDUMP_TLV_HDR_SIZE;
/* append all segments together as they are all part of a single contiguous
* block of memory
*/
for (i = 0; i < rddm_img->entries; i++) {
if (!rddm_img->mhi_buf[i].buf)
continue;
memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
rddm_img->mhi_buf[i].len);
buf += rddm_img->mhi_buf[i].len;
}
mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
if (!dump_seg_sz[mem_idx] || mem_idx == FW_CRASH_DUMP_NONE)
continue;
dump_tlv = buf;
dump_tlv->type = cpu_to_le32(mem_idx);
dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
buf += COREDUMP_TLV_HDR_SIZE;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
mem_type = ath12k_coredump_get_dump_type
(ab->qmi.target_mem[i].type);
if (mem_type != mem_idx)
continue;
if (!ab->qmi.target_mem[i].paddr) {
ath12k_dbg(ab, ATH12K_DBG_PCI,
"Skipping mem region type %d",
ab->qmi.target_mem[i].type);
continue;
}
memcpy_fromio(buf, ab->qmi.target_mem[i].v.ioaddr,
ab->qmi.target_mem[i].size);
buf += ab->qmi.target_mem[i].size;
}
}
queue_work(ab->workqueue, &ab->dump_work);
}
#endif
int ath12k_pci_power_up(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
@ -1329,6 +1511,9 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
.panic_handler = ath12k_pci_panic_handler,
#ifdef CONFIG_ATH12K_COREDUMP
.coredump_download = ath12k_pci_coredump_download,
#endif
};
static
@ -1538,6 +1723,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
qmi_fail:

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@ -186,7 +186,7 @@ void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
struct ath12k_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
@ -235,7 +235,7 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
reinit_completion(&ar->peer_delete_done);
@ -261,14 +261,15 @@ static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8
return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
}
int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ath12k_peer *peer;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (ar->num_peers > (ar->max_num_peers - 1)) {
ath12k_warn(ar->ab,
@ -326,7 +327,7 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
peer->pdev_idx = ar->pdev_idx;
peer->sta = sta;
if (arvif->vif->type == NL80211_IFTYPE_STATION) {
if (vif->type == NL80211_IFTYPE_STATION) {
arvif->ast_hash = peer->ast_hash;
arvif->ast_idx = peer->hw_peer_id;
}

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PEER_H
@ -59,7 +59,7 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id);
void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id);
int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr);
int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ieee80211_sta *sta,
struct ath12k_wmi_peer_create_arg *arg);
int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,

View File

@ -684,18 +684,17 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0)
#define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1)
#define RX_MSDU_END_INFO5_TO_DS BIT(2)
#define RX_MSDU_END_INFO5_TID GENMASK(6, 3)
#define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7)
#define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8)
#define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9)
#define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10)
#define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12)
#define RX_MSDU_END_INFO5_LAST_MSDU BIT(13)
#define RX_MSDU_END_INFO5_FROM_DS BIT(14)
#define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15)
#define RX_MSDU_END_QCN9274_INFO5_TO_DS BIT(2)
#define RX_MSDU_END_QCN9274_INFO5_TID GENMASK(6, 3)
#define RX_MSDU_END_QCN9274_INFO5_FROM_DS BIT(14)
#define RX_MSDU_END_INFO6_MSDU_DROP BIT(0)
#define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1)
#define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6)
@ -709,14 +708,14 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8)
#define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9)
#define RX_MSDU_END_QCN9274_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10)
#define RX_MSDU_END_QCN9274_INFO7_MSDU_LIMIT_ERROR BIT(11)
#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_TIMEOUT BIT(12)
#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_INVALID BIT(13)
#define RX_MSDU_END_QCN9274_INFO7_CCE_MATCH BIT(14)
#define RX_MSDU_END_QCN9274_INFO7_AMSDU_PARSER_ERR BIT(15)
#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10)
#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11)
#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12)
#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13)
#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14)
#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15)
#define RX_MSDU_END_QCN9274_INFO8_KEY_ID GENMASK(7, 0)
#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0)
#define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6)
#define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15)
@ -758,8 +757,8 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18)
#define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21)
#define RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP GENMASK(30, 24)
#define RX_MSDU_END_QCN9274_INFO12_MIMO_DONE_COPY BIT(31)
#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24)
#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31)
#define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0)
#define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2)
@ -791,7 +790,7 @@ enum rx_msdu_start_reception_type {
#define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30)
#define RX_MSDU_END_INFO13_FCS_ERR BIT(31)
#define RX_MSDU_END_QCN9274_INFO13_WIFI_PARSER_ERR BIT(15)
#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15)
#define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10)
#define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13)
@ -889,65 +888,6 @@ struct rx_msdu_end_qcn9274_compact {
__le32 info14;
} __packed;
/* These macro definitions are only used for WCN7850 */
#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0)
#define RX_MSDU_END_WCN7850_INFO5_MSDU_LIMIT_ERR BIT(2)
#define RX_MSDU_END_WCN7850_INFO5_IDX_TIMEOUT BIT(3)
#define RX_MSDU_END_WCN7850_INFO5_IDX_INVALID BIT(4)
#define RX_MSDU_END_WCN7850_INFO5_WIFI_PARSE_ERR BIT(5)
#define RX_MSDU_END_WCN7850_INFO5_AMSDU_PARSER_ERR BIT(6)
#define RX_MSDU_END_WCN7850_INFO5_TCPUDP_CSUM_FAIL_CPY BIT(14)
#define RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP GENMASK(31, 24)
#define RX_MSDU_END_WCN7850_INFO13_FRAGMENT_FLAG BIT(13)
#define RX_MSDU_END_WCN7850_INFO13_CCE_MATCH BIT(15)
struct rx_msdu_end_wcn7850 {
__le16 info0;
__le16 phy_ppdu_id;
__le16 ip_hdr_cksum;
__le16 info1;
__le16 info2;
__le16 cumulative_l3_checksum;
__le32 rule_indication0;
__le32 rule_indication1;
__le16 info3;
__le16 l3_type;
__le32 ipv6_options_crc;
__le32 tcp_seq_num;
__le32 tcp_ack_num;
__le16 info4;
__le16 window_size;
__le16 tcp_udp_chksum;
__le16 info5;
__le16 sa_idx;
__le16 da_idx_or_sw_peer_id;
__le32 info6;
__le32 fse_metadata;
__le16 cce_metadata;
__le16 sa_sw_peer_id;
__le16 info7;
__le16 rsvd0;
__le16 cumulative_l4_checksum;
__le16 cumulative_ip_length;
__le32 info9;
__le32 info10;
__le32 info11;
__le32 toeplitz_hash_2_or_4;
__le32 flow_id_toeplitz;
__le32 info12;
__le32 ppdu_start_timestamp_31_0;
__le32 ppdu_start_timestamp_63_32;
__le32 phy_meta_data;
__le16 vlan_ctag_ci;
__le16 vlan_stag_ci;
__le32 rsvd[3];
__le32 info13;
__le32 info14;
} __packed;
/* rx_msdu_end
*
* rxpcu_mpdu_filter_in_category
@ -1578,7 +1518,7 @@ struct rx_pkt_hdr_tlv {
struct hal_rx_desc_wcn7850 {
__le64 msdu_end_tag;
struct rx_msdu_end_wcn7850 msdu_end;
struct rx_msdu_end_qcn9274 msdu_end;
u8 rx_padding0[RX_BE_PADDING0_BYTES];
__le64 mpdu_start_tag;
struct rx_mpdu_start_qcn9274 mpdu_start;

View File

@ -6687,7 +6687,8 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const u32 *vdev_ids)
{
int i;
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
/* Finish CSA once the switch count becomes NULL */
if (ev->current_switch_count)
@ -6702,9 +6703,10 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
vdev_ids[i]);
continue;
}
ahvif = arvif->ahvif;
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
ieee80211_csa_finish(arvif->vif, 0);
if (arvif->is_up && ahvif->vif->bss_conf.csa_active)
ieee80211_csa_finish(ahvif->vif, 0);
}
rcu_read_unlock();
}
@ -7098,7 +7100,7 @@ static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
const struct wmi_gtk_offload_status_event *ev;
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
__be64 replay_ctr_be;
u64 replay_ctr;
const void **tb;
@ -7136,7 +7138,7 @@ static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
/* supplicant expects big-endian replay counter */
replay_ctr_be = cpu_to_be64(replay_ctr);
ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
(void *)&replay_ctr_be, GFP_ATOMIC);
rcu_read_unlock();
@ -7284,9 +7286,11 @@ static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
u32 pdev_idx)
{
int status;
u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
static const u32 svc_id[] = {
ATH12K_HTC_SVC_ID_WMI_CONTROL,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
};
struct ath12k_htc_svc_conn_req conn_req = {};
struct ath12k_htc_svc_conn_resp conn_resp = {};
@ -7372,13 +7376,13 @@ ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
int ath12k_wmi_simulate_radar(struct ath12k *ar)
{
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
u32 dfs_args[DFS_MAX_TEST_ARGS];
struct wmi_unit_test_cmd wmi_ut;
bool arvif_found = false;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
arvif_found = true;
break;
}
@ -7940,7 +7944,7 @@ static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
}
int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
struct ath12k_vif *arvif,
struct ath12k_link_vif *arvif,
struct wmi_arp_ns_offload_arg *offload,
bool enable)
{
@ -7989,7 +7993,7 @@ int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
}
int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
struct ath12k_vif *arvif, bool enable)
struct ath12k_link_vif *arvif, bool enable)
{
struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
struct wmi_gtk_rekey_offload_cmd *cmd;
@ -8026,7 +8030,7 @@ int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
}
int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
struct ath12k_vif *arvif)
struct ath12k_link_vif *arvif)
{
struct wmi_gtk_rekey_offload_cmd *cmd;
struct sk_buff *skb;

View File

@ -24,7 +24,7 @@
struct ath12k_base;
struct ath12k;
struct ath12k_vif;
struct ath12k_link_vif;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@ -5627,13 +5627,13 @@ int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar,
struct wmi_hw_data_filter_arg *arg);
int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
struct ath12k_vif *arvif,
struct ath12k_link_vif *arvif,
struct wmi_arp_ns_offload_arg *offload,
bool enable);
int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
struct ath12k_vif *arvif, bool enable);
struct ath12k_link_vif *arvif, bool enable);
int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
struct ath12k_vif *arvif);
struct ath12k_link_vif *arvif);
int ath12k_wmi_sta_keepalive(struct ath12k *ar,
const struct wmi_sta_keepalive_arg *arg);

View File

@ -29,11 +29,11 @@ static const struct wiphy_wowlan_support ath12k_wowlan_support = {
.max_pkt_offset = WOW_MAX_PKT_OFFSET,
};
static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *arvif)
static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *ahvif)
{
return (arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
return (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
}
int ath12k_wow_enable(struct ath12k *ar)
@ -101,7 +101,7 @@ int ath12k_wow_wakeup(struct ath12k *ar)
return 0;
}
static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif)
static int ath12k_wow_vif_cleanup(struct ath12k_link_vif *arvif)
{
struct ath12k *ar = arvif->ar;
int i, ret;
@ -129,10 +129,10 @@ static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif)
static int ath12k_wow_cleanup(struct ath12k *ar)
{
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath12k_wow_vif_cleanup(arvif);
@ -191,7 +191,7 @@ ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
memcpy(bytemask, eth_bytemask, eth_pat_len);
pat_len = eth_pat_len;
} else if (eth_pkt_ofs + eth_pat_len < prot_ofs) {
} else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
@ -354,7 +354,7 @@ ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id,
return 0;
}
static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
@ -364,7 +364,7 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
int ret, i, j;
/* Setup requested WOW features */
switch (arvif->vdev_type) {
switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
fallthrough;
@ -473,13 +473,13 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
static int ath12k_wow_set_wakeups(struct ath12k *ar,
struct cfg80211_wowlan *wowlan)
{
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (ath12k_wow_is_p2p_vdev(arvif))
if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
continue;
ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
if (ret) {
@ -518,11 +518,11 @@ static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id)
return ret;
}
static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif)
static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif *arvif)
{
struct ath12k *ar = arvif->ar;
switch (arvif->vdev_type) {
switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id);
default:
@ -532,13 +532,13 @@ static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif)
static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
{
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (ath12k_wow_is_p2p_vdev(arvif))
if (ath12k_wow_is_p2p_vdev(arvif->ahvif))
continue;
ret = ath12k_wow_vif_clean_nlo(arvif);
@ -555,13 +555,13 @@ static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
static int ath12k_wow_set_hw_filter(struct ath12k *ar)
{
struct wmi_hw_data_filter_arg arg;
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
continue;
arg.vdev_id = arvif->vdev_id;
@ -581,13 +581,13 @@ static int ath12k_wow_set_hw_filter(struct ath12k *ar)
static int ath12k_wow_clear_hw_filter(struct ath12k *ar)
{
struct wmi_hw_data_filter_arg arg;
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA)
continue;
arg.vdev_id = arvif->vdev_id;
@ -626,10 +626,10 @@ static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab,
}
}
static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif,
static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif *arvif,
struct wmi_arp_ns_offload_arg *offload)
{
struct net_device *ndev = ieee80211_vif_to_wdev(arvif->vif)->netdev;
struct net_device *ndev = ieee80211_vif_to_wdev(arvif->ahvif->vif)->netdev;
struct ath12k_base *ab = arvif->ar->ab;
struct inet6_ifaddr *ifa6;
struct ifacaddr6 *ifaca6;
@ -710,10 +710,10 @@ static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif,
ath12k_wow_generate_ns_mc_addr(ab, offload);
}
static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif,
static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif *arvif,
struct wmi_arp_ns_offload_arg *offload)
{
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_vif *vif = arvif->ahvif->vif;
struct ieee80211_vif_cfg vif_cfg = vif->cfg;
struct ath12k_base *ab = arvif->ar->ab;
u32 ipv4_cnt;
@ -732,22 +732,25 @@ static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif,
static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
{
struct wmi_arp_ns_offload_arg *offload;
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
offload = kmalloc(sizeof(*offload), GFP_KERNEL);
if (!offload)
return -ENOMEM;
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
ahvif = arvif->ahvif;
if (ahvif->vdev_type != WMI_VDEV_TYPE_STA)
continue;
memset(offload, 0, sizeof(*offload));
memcpy(offload->mac_addr, arvif->vif->addr, ETH_ALEN);
memcpy(offload->mac_addr, ahvif->vif->addr, ETH_ALEN);
ath12k_wow_prepare_ns_offload(arvif, offload);
ath12k_wow_prepare_arp_offload(arvif, offload);
@ -766,13 +769,13 @@ static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
{
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA ||
!arvif->is_up ||
!arvif->rekey_data.enable_offload)
continue;
@ -824,10 +827,10 @@ static int ath12k_wow_set_keepalive(struct ath12k *ar,
enum wmi_sta_keepalive_method method,
u32 interval)
{
struct ath12k_vif *arvif;
struct ath12k_link_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath12k_mac_vif_set_keepalive(arvif, method, interval);
@ -845,7 +848,7 @@ int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
int ret;
mutex_lock(&ar->conf_mutex);
lockdep_assert_wiphy(hw->wiphy);
ret = ath12k_wow_cleanup(ar);
if (ret) {
@ -913,7 +916,6 @@ int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
ath12k_wow_cleanup(ar);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
@ -922,9 +924,9 @@ void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
lockdep_assert_wiphy(hw->wiphy);
device_set_wakeup_enable(ar->ab->dev, enabled);
mutex_unlock(&ar->conf_mutex);
}
int ath12k_wow_op_resume(struct ieee80211_hw *hw)
@ -933,7 +935,7 @@ int ath12k_wow_op_resume(struct ieee80211_hw *hw)
struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
int ret;
mutex_lock(&ar->conf_mutex);
lockdep_assert_wiphy(hw->wiphy);
ret = ath12k_hif_resume(ar->ab);
if (ret) {
@ -995,7 +997,6 @@ int ath12k_wow_op_resume(struct ieee80211_hw *hw)
}
}
mutex_unlock(&ar->conf_mutex);
return ret;
}

View File

@ -218,10 +218,10 @@ static void ath_ahb_remove(struct platform_device *pdev)
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
.remove_new = ath_ahb_remove,
.driver = {
.name = "ar231x-wmac",
.probe = ath_ahb_probe,
.remove = ath_ahb_remove,
.driver = {
.name = "ar231x-wmac",
},
};

View File

@ -46,6 +46,8 @@ static const struct pci_device_id ath5k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
{ PCI_VDEVICE(ATHEROS, 0xff16) }, /* Gigaset SX76[23] AR241[34]A */
{ PCI_VDEVICE(ATHEROS, 0xff1a) }, /* Arcadyan ARV45XX AR2417 */
{ PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
{ 0 }
};

View File

@ -1249,7 +1249,7 @@ struct wmi_rssi_threshold_params_cmd {
/* highest of upper */
a_sle16 thresh_above6_val;
/* lowest of bellow */
/* lowest of below */
a_sle16 thresh_below1_val;
a_sle16 thresh_below2_val;
@ -1257,7 +1257,7 @@ struct wmi_rssi_threshold_params_cmd {
a_sle16 thresh_below4_val;
a_sle16 thresh_below5_val;
/* highest of bellow */
/* highest of below */
a_sle16 thresh_below6_val;
/* "alpha" */
@ -1287,13 +1287,13 @@ struct wmi_snr_threshold_params_cmd {
/* highest of upper */
u8 thresh_above4_val;
/* lowest of bellow */
/* lowest of below */
u8 thresh_below1_val;
u8 thresh_below2_val;
u8 thresh_below3_val;
/* highest of bellow */
/* highest of below */
u8 thresh_below4_val;
u8 reserved[3];

View File

@ -158,12 +158,12 @@ static void ath_ahb_remove(struct platform_device *pdev)
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
.remove_new = ath_ahb_remove,
.driver = {
.name = "ath9k",
.probe = ath_ahb_probe,
.remove = ath_ahb_remove,
.driver = {
.name = "ath9k",
},
.id_table = ath9k_platform_id_table,
.id_table = ath9k_platform_id_table,
};
MODULE_DEVICE_TABLE(platform, ath9k_platform_id_table);

View File

@ -409,13 +409,11 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
sram.valid = true;
sram.rot_dir_att_db =
min(max(rot_dir_path_att_db,
(int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
ATH_AIC_MAX_ROT_DIR_ATT_DB);
clamp(rot_dir_path_att_db, (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB,
ATH_AIC_MAX_ROT_DIR_ATT_DB);
sram.rot_quad_att_db =
min(max(rot_quad_path_att_db,
(int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
ATH_AIC_MAX_ROT_QUAD_ATT_DB);
clamp(rot_quad_path_att_db, (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB,
ATH_AIC_MAX_ROT_QUAD_ATT_DB);
aic->aic_sram[i] = (SM(sram.vga_dir_sign,
AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |

View File

@ -18,7 +18,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ath9k_platform.h>
#include <linux/nvmem-consumer.h>
#include <linux/workqueue.h>

View File

@ -16,7 +16,6 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/ath9k_platform.h>
#include "hw.h"
enum ath_bt_mode {
@ -115,23 +114,14 @@ static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio,
u8 btactive_gpio, u8 btpriority_gpio)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
struct ath9k_platform_data *pdata = ah->dev->platform_data;
if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE &&
btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE)
return;
/* bt priority GPIO will be ignored by 2 wire scheme */
if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin ||
pdata->wlan_active_pin)) {
btcoex_hw->btactive_gpio = pdata->bt_active_pin;
btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin;
btcoex_hw->btpriority_gpio = pdata->bt_priority_pin;
} else {
btcoex_hw->btactive_gpio = btactive_gpio;
btcoex_hw->wlanactive_gpio = wlanactive_gpio;
btcoex_hw->btpriority_gpio = btpriority_gpio;
}
btcoex_hw->btactive_gpio = btactive_gpio;
btcoex_hw->wlanactive_gpio = wlanactive_gpio;
btcoex_hw->btpriority_gpio = btpriority_gpio;
}
void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)

View File

@ -15,7 +15,6 @@
*/
#include "hw.h"
#include <linux/ath9k_platform.h>
void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
{
@ -119,14 +118,6 @@ static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size,
return true;
}
static bool ath9k_hw_nvram_read_pdata(struct ath9k_platform_data *pdata,
off_t offset, u16 *data)
{
return ath9k_hw_nvram_read_array(pdata->eeprom_data,
ARRAY_SIZE(pdata->eeprom_data),
offset, data);
}
static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob,
off_t offset, u16 *data)
{
@ -146,15 +137,12 @@ static bool ath9k_hw_nvram_read_nvmem(struct ath_hw *ah, off_t offset,
bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_platform_data *pdata = ah->dev->platform_data;
bool ret;
if (ah->nvmem_blob)
ret = ath9k_hw_nvram_read_nvmem(ah, off, data);
else if (ah->eeprom_blob)
ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data);
else if (pdata && !pdata->use_eeprom)
ret = ath9k_hw_nvram_read_pdata(pdata, off, data);
else
ret = common->bus_ops->eeprom_read(common, off, data);

View File

@ -294,6 +294,9 @@ int htc_connect_service(struct htc_target *target,
return -ETIMEDOUT;
}
if (target->conn_rsp_epid < 0 || target->conn_rsp_epid >= ENDPOINT_MAX)
return -EINVAL;
*conn_rsp_epid = target->conn_rsp_epid;
return 0;
err:

View File

@ -490,7 +490,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah)
u16 eeval;
static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
/* MAC address may already be loaded via ath9k_platform_data */
/* MAC address may already be loaded via NVMEM */
if (is_valid_ether_addr(common->macaddr))
return;

View File

@ -18,7 +18,6 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/ath9k_platform.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
@ -583,8 +582,8 @@ static int ath9k_nvmem_request_eeprom(struct ath_softc *sc)
/* nvmem cell might not be defined, or the nvmem
* subsystem isn't included. In this case, follow
* the established "just return 0;" convention of
* ath9k_init_platform to say:
* the established "just return 0;" convention
* to say:
* "All good. Nothing to see here. Please go on."
*/
if (err == -ENOENT || err == -EOPNOTSUPP)
@ -620,49 +619,6 @@ static int ath9k_nvmem_request_eeprom(struct ath_softc *sc)
return 0;
}
static int ath9k_init_platform(struct ath_softc *sc)
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
int ret;
if (!pdata)
return 0;
if (!pdata->use_eeprom) {
ah->ah_flags &= ~AH_USE_EEPROM;
ah->gpio_mask = pdata->gpio_mask;
ah->gpio_val = pdata->gpio_val;
ah->led_pin = pdata->led_pin;
ah->is_clk_25mhz = pdata->is_clk_25mhz;
ah->get_mac_revision = pdata->get_mac_revision;
ah->external_reset = pdata->external_reset;
ah->disable_2ghz = pdata->disable_2ghz;
ah->disable_5ghz = pdata->disable_5ghz;
if (!pdata->endian_check)
ah->ah_flags |= AH_NO_EEP_SWAP;
}
if (pdata->eeprom_name) {
ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
if (ret)
return ret;
}
if (pdata->led_active_high)
ah->config.led_active_high = true;
if (pdata->tx_gain_buffalo)
ah->config.tx_gain_buffalo = true;
if (pdata->macaddr)
ether_addr_copy(common->macaddr, pdata->macaddr);
return 0;
}
static int ath9k_of_init(struct ath_softc *sc)
{
struct device_node *np = sc->dev->of_node;
@ -748,10 +704,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
*/
ath9k_init_pcoem_platform(sc);
ret = ath9k_init_platform(sc);
if (ret)
return ret;
ret = ath9k_of_init(sc);
if (ret)
return ret;

View File

@ -1679,10 +1679,10 @@ static const struct of_device_id wcn36xx_of_match[] = {
MODULE_DEVICE_TABLE(of, wcn36xx_of_match);
static struct platform_driver wcn36xx_driver = {
.probe = wcn36xx_probe,
.remove_new = wcn36xx_remove,
.driver = {
.name = "wcn36xx",
.probe = wcn36xx_probe,
.remove = wcn36xx_remove,
.driver = {
.name = "wcn36xx",
.of_match_table = wcn36xx_of_match,
},
};

View File

@ -167,7 +167,7 @@ struct wcn36xx_vif {
* @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
* call and is used in TX BD.
* @bss_sta_index: STA index is returned from HW after config_bss call and is
* used in both SMD channel and TX BD. See table bellow when it is used.
* used in both SMD channel and TX BD. See table below when it is used.
* @bss_dpu_desc_index: DPU descriptor index is returned from HW after
* config_bss call and is used in TX BD.
* ______________________________________________

View File

@ -770,7 +770,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
sdiodev->settings->bus.sdio.txglomsz);
nents += (nents >> 4) + 1;
nents *= 2;
WARN_ON(nents > sdiodev->max_segment_count);

View File

@ -594,7 +594,7 @@ static void brcmf_common_pd_remove(struct platform_device *pdev)
}
static struct platform_driver brcmf_pd = {
.remove_new = brcmf_common_pd_remove,
.remove = brcmf_common_pd_remove,
.driver = {
.name = BRCMFMAC_PDATA_NAME,
}

View File

@ -112,9 +112,8 @@ int brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
}
strreplace(board_type, '/', '-');
settings->board_type = board_type;
of_node_put(root);
}
of_node_put(root);
clk = devm_clk_get_optional_enabled_with_rate(dev, "lpo", 32768);
if (IS_ERR(clk))

View File

@ -1426,15 +1426,6 @@ int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
return -ENOSPC;
}
void dma_txflush(struct dma_pub *pub)
{
struct dma_info *di = container_of(pub, struct dma_info, dma);
struct brcms_ampdu_session *session = &di->ampdu_session;
if (!skb_queue_empty(&session->skb_list))
ampdu_finalize(di);
}
int dma_txpending(struct dma_pub *pub)
{
struct dma_info *di = container_of(pub, struct dma_info, dma);

View File

@ -88,7 +88,6 @@ bool dma_txreset(struct dma_pub *pub);
void dma_txinit(struct dma_pub *pub);
int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
struct sk_buff *p0);
void dma_txflush(struct dma_pub *pub);
int dma_txpending(struct dma_pub *pub);
void dma_kick_tx(struct dma_pub *pub);
void dma_txsuspend(struct dma_pub *pub);

View File

@ -867,8 +867,8 @@ void libipw_rx_any(struct libipw_device *ieee,
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
/* our BSS and not from/to DS */
if (ether_addr_equal(hdr->addr3, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
if (ether_addr_equal(hdr->addr3, ieee->bssid) &&
((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;
@ -882,8 +882,8 @@ void libipw_rx_any(struct libipw_device *ieee,
break;
case IW_MODE_INFRA:
/* our BSS (== from our AP) and from DS */
if (ether_addr_equal(hdr->addr2, ieee->bssid))
if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
if (ether_addr_equal(hdr->addr2, ieee->bssid) &&
((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) {
/* promisc: get all */
if (ieee->dev->flags & IFF_PROMISC)
is_packet_for_us = 1;

View File

@ -566,7 +566,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
return;
}
/* Convert 3945's rssi indicator to dBm */

View File

@ -664,7 +664,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
return;
}
/* This will be used in several places later */

View File

@ -13,7 +13,7 @@
#define IWL_BZ_UCODE_API_MAX 94
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 90
#define IWL_BZ_UCODE_API_MIN 92
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d

View File

@ -13,7 +13,7 @@
#define IWL_SC_UCODE_API_MAX 94
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 90
#define IWL_SC_UCODE_API_MIN 92
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d

View File

@ -56,8 +56,6 @@ struct iwl_binding_cmd {
} __packed; /* BINDING_CMD_API_S_VER_2 */
#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1)
#define IWL_LMAC_24G_INDEX 0
#define IWL_LMAC_5G_INDEX 1
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128

View File

@ -40,4 +40,7 @@ enum iwl_ctxt_action {
FW_CTXT_ACTION_REMOVE,
}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
#define IWL_LMAC_24G_INDEX 0
#define IWL_LMAC_5G_INDEX 1
#endif /* __iwl_fw_api_context_h__ */

View File

@ -368,7 +368,7 @@ enum iwl_wowlan_flags {
};
/**
* struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6)
* struct iwl_wowlan_config_cmd_v6 - WoWLAN configuration (versions 5 and 6)
* @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
* @non_qos_seq: non-QoS sequence counter to use next.
* Reserved if the struct has version >= 6.
@ -380,7 +380,7 @@ enum iwl_wowlan_flags {
* @sta_id: station ID for wowlan.
* @reserved: reserved
*/
struct iwl_wowlan_config_cmd {
struct iwl_wowlan_config_cmd_v6 {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
@ -390,7 +390,27 @@ struct iwl_wowlan_config_cmd {
u8 flags;
u8 sta_id;
u8 reserved;
} __packed; /* WOWLAN_CONFIG_API_S_VER_5 */
} __packed; /* WOWLAN_CONFIG_API_S_VER_6 */
/**
* struct iwl_wowlan_config_cmd - WoWLAN configuration
* @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
* @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
* @is_11n_connection: indicates HT connection
* @offloading_tid: TID reserved for firmware use
* @flags: extra flags, see &enum iwl_wowlan_flags
* @sta_id: station ID for wowlan.
* @reserved: reserved
*/
struct iwl_wowlan_config_cmd {
__le32 wakeup_filter;
u8 wowlan_ba_teardown_tids;
u8 is_11n_connection;
u8 offloading_tid;
u8 flags;
u8 sta_id;
u8 reserved[3];
} __packed; /* WOWLAN_CONFIG_API_S_VER_7 */
#define IWL_NUM_RSC 16
#define WOWLAN_KEY_MAX_SIZE 32
@ -890,7 +910,7 @@ struct iwl_wowlan_mlo_gtk {
} __packed; /* WOWLAN_MLO_GTK_KEY_API_S_VER_1 */
/**
* struct iwl_wowlan_info_notif - WoWLAN information notification
* struct iwl_wowlan_info_notif_v4 - WoWLAN information notification
* @gtk: GTK data
* @igtk: IGTK data
* @bigtk: BIGTK data
@ -910,7 +930,7 @@ struct iwl_wowlan_mlo_gtk {
* @reserved2: reserved
* @mlo_gtks: array of GTKs of size num_mlo_link_keys for version >= 4
*/
struct iwl_wowlan_info_notif {
struct iwl_wowlan_info_notif_v4 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
@ -929,6 +949,45 @@ struct iwl_wowlan_info_notif {
struct iwl_wowlan_mlo_gtk mlo_gtks[];
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3, _VER_4 */
/**
* struct iwl_wowlan_info_notif - WoWLAN information notification
* @gtk: GTK data
* @igtk: IGTK data
* @bigtk: BIGTK data
* @replay_ctr: GTK rekey replay counter
* @pattern_number: number of the matched patterns
* @qos_seq_ctr: QoS sequence counters to use next
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
* @num_of_gtk_rekeys: number of GTK rekeys
* @transmitted_ndps: number of transmitted neighbor discovery packets
* @received_beacons: number of received beacons
* @tid_tear_down: bit mask of tids whose BA sessions were closed
* in suspend state
* @station_id: station id
* @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs
* following this notif
* @tid_offloaded_tx: tid used by the firmware to transmit data packets
* while in wowlan
* @mlo_gtks: array of GTKs of size num_mlo_link_keys
*/
struct iwl_wowlan_info_notif {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 qos_seq_ctr;
__le32 wakeup_reasons;
__le32 num_of_gtk_rekeys;
__le32 transmitted_ndps;
__le32 received_beacons;
u8 tid_tear_down;
u8 station_id;
u8 num_mlo_link_keys;
u8 tid_offloaded_tx;
struct iwl_wowlan_mlo_gtk mlo_gtks[];
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */
/**
* struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
* @wake_packet_length: wakeup packet length

View File

@ -616,6 +616,9 @@ struct iwl_tof_range_req_ap_entry_v2 {
* continue with the session and will provide the LMR feedback.
* @IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC: send an incorrect SAC in the
* first NDP exchange. This is used for testing.
* @IWL_INITIATOR_AP_FLAGS_TEST_BAD_SLTF: use incorrect secure LTF tx key. This
* is used for testing. Only supported from version 15 of the range request
* command.
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@ -633,6 +636,7 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_PMF = BIT(14),
IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15),
IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = BIT(16),
IWL_INITIATOR_AP_FLAGS_TEST_BAD_SLTF = BIT(17),
};
/**
@ -767,7 +771,7 @@ enum iwl_location_cipher {
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14)
* @sta_id: the station id of the AP. Only relevant when associated to the AP,
* otherwise should be set to &IWL_MVM_INVALID_STA.
* otherwise should be set to &IWL_INVALID_STA.
* @cipher: pairwise cipher suite for secured measurement.
* &enum iwl_location_cipher.
* @hltk: HLTK to be used for secured 11az measurement
@ -814,7 +818,7 @@ struct iwl_tof_range_req_ap_entry_v6 {
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14)
* @sta_id: the station id of the AP. Only relevant when associated to the AP,
* otherwise should be set to &IWL_MVM_INVALID_STA.
* otherwise should be set to &IWL_INVALID_STA.
* @cipher: pairwise cipher suite for secured measurement.
* &enum iwl_location_cipher.
* @hltk: HLTK to be used for secured 11az measurement
@ -827,10 +831,10 @@ struct iwl_tof_range_req_ap_entry_v6 {
* &IWL_INITIATOR_AP_FLAGS_TB is set.
* @rx_pn: the next expected PN for protected management frames Rx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
* @tx_pn: the next PN to use for protected management frames Tx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
*/
struct iwl_tof_range_req_ap_entry_v7 {
__le32 initiator_ap_flags;
@ -872,7 +876,7 @@ struct iwl_tof_range_req_ap_entry_v7 {
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14)
* @sta_id: the station id of the AP. Only relevant when associated to the AP,
* otherwise should be set to &IWL_MVM_INVALID_STA.
* otherwise should be set to &IWL_INVALID_STA.
* @cipher: pairwise cipher suite for secured measurement.
* &enum iwl_location_cipher.
* @hltk: HLTK to be used for secured 11az measurement
@ -885,10 +889,10 @@ struct iwl_tof_range_req_ap_entry_v7 {
* &IWL_INITIATOR_AP_FLAGS_TB is set.
* @rx_pn: the next expected PN for protected management frames Rx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
* @tx_pn: the next PN to use for protected management frames Tx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
* @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
* bits 0 - 2: max LTF repetitions
* bits 3 - 5: max number of spatial streams
@ -946,7 +950,7 @@ struct iwl_tof_range_req_ap_entry_v8 {
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14)
* @sta_id: the station id of the AP. Only relevant when associated to the AP,
* otherwise should be set to &IWL_MVM_INVALID_STA.
* otherwise should be set to &IWL_INVALID_STA.
* @cipher: pairwise cipher suite for secured measurement.
* &enum iwl_location_cipher.
* @hltk: HLTK to be used for secured 11az measurement
@ -961,10 +965,10 @@ struct iwl_tof_range_req_ap_entry_v8 {
* &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set.
* @rx_pn: the next expected PN for protected management frames Rx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
* @tx_pn: the next PN to use for protected management frames Tx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
* @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
* bits 0 - 2: max LTF repetitions
* bits 3 - 5: max number of spatial streams
@ -1029,7 +1033,7 @@ struct iwl_tof_range_req_ap_entry_v9 {
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
* the number of measurement iterations (min 2^0 = 1, max 2^14)
* @sta_id: the station id of the AP. Only relevant when associated to the AP,
* otherwise should be set to &IWL_MVM_INVALID_STA.
* otherwise should be set to &IWL_INVALID_STA.
* @cipher: pairwise cipher suite for secured measurement.
* &enum iwl_location_cipher.
* @hltk: HLTK to be used for secured 11az measurement
@ -1042,10 +1046,10 @@ struct iwl_tof_range_req_ap_entry_v9 {
* &IWL_INITIATOR_AP_FLAGS_TB is set.
* @rx_pn: the next expected PN for protected management frames Rx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
* @tx_pn: the next PN to use for protected management frames Tx. LE byte
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
* is set to &IWL_MVM_INVALID_STA.
* is set to &IWL_INVALID_STA.
* @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
* bits 0 - 2: max LTF repetitions
* bits 3 - 5: max number of spatial streams

View File

@ -17,7 +17,7 @@
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
#define IWL_STATION_COUNT_MAX 16
#define IWL_MVM_INVALID_STA 0xFF
#define IWL_INVALID_STA 0xFF
enum iwl_ac {
AC_BK,

View File

@ -537,13 +537,18 @@ bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id)
/* cf. struct iwl_error_event_table */
u32 valid;
__le32 err_id;
} err_info;
} err_info = {};
int ret;
if (!base)
return false;
iwl_trans_read_mem_bytes(trans, base,
&err_info, sizeof(err_info));
ret = iwl_trans_read_mem_bytes(trans, base,
&err_info, sizeof(err_info));
if (ret)
return true;
if (err_info.valid && err_id)
*err_id = le32_to_cpu(err_info.err_id);

View File

@ -1074,12 +1074,13 @@ int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
#endif
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
do { \
if (__builtin_constant_p(bufsize)) \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
({ \
if (__builtin_constant_p(bufsize)) \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
iwl_trans_read_mem(trans, addr, buf, \
(bufsize) / sizeof(u32)); \
})
int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
u64 src_addr, u32 byte_cnt);

View File

@ -689,7 +689,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Rssi update while not associated - can happen since the statistics
* are handled asynchronously
*/
if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA)
if (mvmvif->deflink.ap_sta_id == IWL_INVALID_STA)
return;
/* No BT - reports should be disabled */

View File

@ -19,6 +19,7 @@
#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 15
#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
#define IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH -72
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)

View File

@ -922,7 +922,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
static int
iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
struct ieee80211_sta *ap_sta)
{
@ -948,7 +948,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
}
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 7)
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
if (wowlan->disconnect)
wowlan_config_cmd->wakeup_filter |=
@ -1122,7 +1123,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
static int
iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd_v6,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_vif_link_info *mvm_link,
struct ieee80211_sta *ap_sta)
@ -1131,7 +1132,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
mvm->offload_tid = wowlan_config_cmd->offloading_tid;
mvm->offload_tid = wowlan_config_cmd_v6->offloading_tid;
if (!unified_image) {
ret = iwl_mvm_switch_to_d3(mvm);
@ -1147,9 +1148,26 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
sizeof(*wowlan_config_cmd),
wowlan_config_cmd);
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) > 6) {
struct iwl_wowlan_config_cmd wowlan_config_cmd = {
.wakeup_filter = wowlan_config_cmd_v6->wakeup_filter,
.wowlan_ba_teardown_tids =
wowlan_config_cmd_v6->wowlan_ba_teardown_tids,
.is_11n_connection =
wowlan_config_cmd_v6->is_11n_connection,
.offloading_tid = wowlan_config_cmd_v6->offloading_tid,
.flags = wowlan_config_cmd_v6->flags,
.sta_id = wowlan_config_cmd_v6->sta_id,
};
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
} else {
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
sizeof(*wowlan_config_cmd_v6),
wowlan_config_cmd_v6);
}
if (ret)
return ret;
@ -1288,7 +1306,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
goto out_noreset;
}
if (mvm_link->ap_sta_id == IWL_MVM_INVALID_STA) {
if (mvm_link->ap_sta_id == IWL_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@ -1302,7 +1320,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
struct iwl_wowlan_config_cmd wowlan_config_cmd = {
struct iwl_wowlan_config_cmd_v6 wowlan_config_cmd = {
.offloading_tid = 0,
};
@ -1425,6 +1443,7 @@ struct iwl_wowlan_status_data {
u16 non_qos_seq_ctr;
u16 qos_seq_ctr[8];
u8 tid_tear_down;
u8 tid_offloaded_tx;
struct {
/* including RX MIC key for TKIP */
@ -2474,7 +2493,64 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
struct iwl_wowlan_info_notif *data,
struct iwl_wowlan_status_data *status,
u32 len, bool has_mlo_keys)
u32 len)
{
u32 expected_len = sizeof(*data) +
data->num_mlo_link_keys * sizeof(status->mlo_keys[0]);
if (!data) {
IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
status = NULL;
return;
}
if (len < expected_len) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
return;
}
if (mvm->fast_resume)
return;
iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
iwl_mvm_convert_gtk_v3(status, data->gtk);
iwl_mvm_convert_igtk(status, &data->igtk[0]);
iwl_mvm_convert_bigtk(status, data->bigtk);
status->replay_ctr = le64_to_cpu(data->replay_ctr);
status->pattern_number = le16_to_cpu(data->pattern_number);
status->tid_offloaded_tx = data->tid_offloaded_tx;
if (IWL_FW_CHECK(mvm,
data->tid_offloaded_tx >=
ARRAY_SIZE(status->qos_seq_ctr),
"tid_offloaded_tx is out of bound %d\n",
data->tid_offloaded_tx))
data->tid_offloaded_tx = 0;
status->qos_seq_ctr[data->tid_offloaded_tx] =
le16_to_cpu(data->qos_seq_ctr);
status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
status->num_of_gtk_rekeys =
le32_to_cpu(data->num_of_gtk_rekeys);
status->received_beacons = le32_to_cpu(data->received_beacons);
status->tid_tear_down = data->tid_tear_down;
if (data->num_mlo_link_keys) {
status->num_mlo_keys = data->num_mlo_link_keys;
if (IWL_FW_CHECK(mvm,
status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS,
"Too many mlo keys: %d, max %d\n",
status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS))
status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS;
memcpy(status->mlo_keys, data->mlo_gtks,
status->num_mlo_keys * sizeof(status->mlo_keys[0]));
}
}
static void
iwl_mvm_parse_wowlan_info_notif_v4(struct iwl_mvm *mvm,
struct iwl_wowlan_info_notif_v4 *data,
struct iwl_wowlan_status_data *status,
u32 len, bool has_mlo_keys)
{
u32 i;
u32 expected_len = sizeof(*data);
@ -2746,6 +2822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id];
int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw,
PROT_OFFLOAD_GROUP,
WOWLAN_INFO_NOTIFICATION,
IWL_FW_CMD_VER_UNKNOWN);
if (WARN_ON(!mvm_link))
goto out_unlock;
@ -2760,11 +2840,14 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!mvm_ap_sta)
goto out_unlock;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status->qos_seq_ctr[i];
/* firmware stores last-used value, we store next value */
seq += 0x10;
mvm_ap_sta->tid_data[i].seq_number = seq;
/* firmware stores last-used value, we store next value */
if (wowlan_info_ver >= 5) {
mvm_ap_sta->tid_data[status->tid_offloaded_tx].seq_number =
status->qos_seq_ctr[status->tid_offloaded_tx] + 0x10;
} else {
for (i = 0; i < IWL_MAX_TID_COUNT; i++)
mvm_ap_sta->tid_data[i].seq_number =
status->qos_seq_ctr[i] + 0x10;
}
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
@ -3026,7 +3109,6 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@ -3076,7 +3158,7 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
/* if FW uses status notification, status shouldn't be NULL here */
if (!d3_data->status) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA :
u8 sta_id = mvm->net_detect ? IWL_INVALID_STA :
mvmvif->deflink.ap_sta_id;
/* bug - FW with MLO has status notification */
@ -3248,13 +3330,19 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2,
d3_data->status,
len);
} else if (wowlan_info_ver < 5) {
struct iwl_wowlan_info_notif_v4 *notif =
(void *)pkt->data;
iwl_mvm_parse_wowlan_info_notif_v4(mvm, notif,
d3_data->status, len,
wowlan_info_ver > 3);
} else {
struct iwl_wowlan_info_notif *notif =
(void *)pkt->data;
iwl_mvm_parse_wowlan_info_notif(mvm, notif,
d3_data->status, len,
wowlan_info_ver > 3);
d3_data->status, len);
}
d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
@ -3596,8 +3684,6 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
IWL_ERR(mvm,
"fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
WARN_ON(iwl_mvm_power_update_mac(mvm));
ret = iwl_trans_d3_suspend(mvm->trans, false, false);
if (ret)
IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
@ -3619,22 +3705,31 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
if (iwl_mvm_check_rt_status(mvm, NULL)) {
IWL_ERR(mvm,
"iwl_mvm_check_rt_status failed, device is gone during suspend\n");
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
return -ENODEV;
mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
goto out;
}
ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
if (ret) {
IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
mvm->trans->state = IWL_TRANS_NO_FW;
}
out:
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
mvm->fast_resume = false;
if (ret)
IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
return ret;
}

View File

@ -221,7 +221,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
mvmvif->deflink.queue_params[i].uapsd);
if (vif->type == NL80211_IFTYPE_STATION &&
ap_sta_id != IWL_MVM_INVALID_STA) {
ap_sta_id != IWL_INVALID_STA) {
struct iwl_mvm_sta *mvm_sta;
mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
@ -463,11 +463,13 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
static ssize_t
iwl_dbgfs_low_latency_write_handle(struct wiphy *wiphy, struct file *file,
char *buf, size_t count, void *data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *vif = data;
u8 value;
int ret;
@ -484,12 +486,28 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
return count;
}
static ssize_t
iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf,
size_t count, loff_t *ppos)
static ssize_t iwl_dbgfs_low_latency_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[10] = {};
return wiphy_locked_debugfs_write(mvm->hw->wiphy, file,
buf, sizeof(buf), user_buf, count,
iwl_dbgfs_low_latency_write_handle,
vif);
}
static ssize_t
iwl_dbgfs_low_latency_force_write_handle(struct wiphy *wiphy, struct file *file,
char *buf, size_t count, void *data)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *vif = data;
u8 value;
int ret;
@ -517,6 +535,22 @@ iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf,
return count;
}
static ssize_t
iwl_dbgfs_low_latency_force_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
char buf[10] = {};
return wiphy_locked_debugfs_write(mvm->hw->wiphy, file,
buf, sizeof(buf), user_buf, count,
iwl_dbgfs_low_latency_force_write_handle,
vif);
}
static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@ -831,8 +865,20 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10);
static const struct file_operations iwl_dbgfs_low_latency_ops = {
.write = iwl_dbgfs_low_latency_write,
.read = iwl_dbgfs_low_latency_read,
.open = simple_open,
.llseek = generic_file_llseek,
};
static const struct file_operations iwl_dbgfs_low_latency_force_ops = {
.write = iwl_dbgfs_low_latency_force_write,
.open = simple_open,
.llseek = generic_file_llseek,
};
MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);

View File

@ -559,12 +559,12 @@ static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->ftm_unprotected) {
*sta_id = IWL_MVM_INVALID_STA;
*sta_id = IWL_INVALID_STA;
*flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
}
#endif
} else {
*sta_id = IWL_MVM_INVALID_STA;
*sta_id = IWL_INVALID_STA;
}
return 0;
@ -1063,6 +1063,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
case 15:
/* Version 15 has the same struct as 14 */
case 14:
err = iwl_mvm_ftm_start_v14(mvm, vif, req);
break;

View File

@ -131,7 +131,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (cmd_ver == 10) {
if (cmd_ver >= 10) {
cmd.band =
iwl_mvm_phy_band_from_nl80211(chandef->chan->band);
}

View File

@ -1401,6 +1401,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
int ret, i;
struct ieee80211_supported_band *sband = NULL;
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
ret = iwl_trans_start_hw(mvm->trans);
@ -1484,7 +1485,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
for (i = 0; i < IWL_FW_MAX_LINK_ID + 1; i++)
RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
@ -1620,6 +1621,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
{
int ret, i;
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
ret = iwl_trans_start_hw(mvm->trans);

View File

@ -1167,3 +1167,14 @@ void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!mvmvif->esr_disable_reason)
iwl_mvm_esr_unblocked(mvm, vif);
}
void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link)
{
link->bcast_sta.sta_id = IWL_INVALID_STA;
link->mcast_sta.sta_id = IWL_INVALID_STA;
link->ap_sta_id = IWL_INVALID_STA;
for (int r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++)
link->smps_requests[r] =
IEEE80211_SMPS_AUTOMATIC;
}

View File

@ -216,7 +216,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
.preferred_tsf = NUM_TSF_IDS,
.found_vif = false,
};
int ret, i;
int ret;
lockdep_assert_held(&mvm->mutex);
@ -298,9 +298,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->time_event_data.id = TE_MAX;
mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
iwl_mvm_init_link(&mvmvif->deflink);
/* No need to allocate data queues to P2P Device MAC and NAN.*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
@ -316,9 +314,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
}
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
return 0;
exit_fail:
@ -1696,6 +1691,9 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
ieee80211_beacon_loss(vif);
else
ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
/* try to switch links, no-op if we don't have MLO */
iwl_mvm_int_mlo_scan(mvm, vif);
}
iwl_dbg_tlv_time_point(&mvm->fwrt,

View File

@ -1109,7 +1109,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
for_each_mvm_vif_valid_link(mvmvif, link_id) {
mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA;
mvmvif->link[link_id]->ap_sta_id = IWL_INVALID_STA;
mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
mvmvif->link[link_id]->phy_ctxt = NULL;
mvmvif->link[link_id]->active = 0;
@ -1237,6 +1237,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
fast_resume = mvm->fast_resume;
if (fast_resume) {
iwl_mvm_mei_device_state(mvm, true);
ret = iwl_mvm_fast_resume(mvm);
if (ret) {
iwl_mvm_stop_device(mvm);
@ -1379,10 +1380,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
iwl_mvm_rm_aux_sta(mvm);
if (suspend &&
mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
iwl_mvm_fast_suspend(mvm);
else
/* From this point on, we won't touch the device */
iwl_mvm_mei_device_state(mvm, false);
} else {
iwl_mvm_stop_device(mvm);
}
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@ -2951,7 +2955,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_MVM_SMPS_REQ_PROT,
IEEE80211_SMPS_DYNAMIC, 0);
}
} else if (mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) {
} else if (mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
iwl_mvm_mei_host_disassociated(mvm);
/*
* If update fails - SF might be running in associated
@ -2987,7 +2991,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm,
"failed to remove AP station\n");
mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
mvmvif->deflink.ap_sta_id = IWL_INVALID_STA;
}
/* remove quota for this interface */
@ -3444,7 +3448,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
*/
break;
case STA_NOTIFY_AWAKE:
if (WARN_ON(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON(mvmsta->deflink.sta_id == IWL_INVALID_STA))
break;
if (txqs)
@ -3524,6 +3528,8 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
unsigned int link_id;
lockdep_assert_wiphy(mvm->hw->wiphy);
/*
* This is called before mac80211 does RCU synchronisation,
* so here we already invalidate our internal RCU-protected

View File

@ -396,7 +396,7 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0);
if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION ||
link->ap_sta_id == IWL_MVM_INVALID_STA))
link->ap_sta_id == IWL_INVALID_STA))
return;
if (!sec_key_ver)

View File

@ -824,7 +824,7 @@ static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif)
int i;
for_each_mvm_vif_valid_link(mvmvif, i) {
if (mvmvif->link[i]->ap_sta_id != IWL_MVM_INVALID_STA)
if (mvmvif->link[i]->ap_sta_id != IWL_INVALID_STA)
return true;
}
@ -851,7 +851,7 @@ static void iwl_mvm_mld_vif_delete_all_stas(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to remove AP station\n");
link->ap_sta_id = IWL_MVM_INVALID_STA;
link->ap_sta_id = IWL_INVALID_STA;
}
}
@ -1169,8 +1169,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
int err, i;
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
int r;
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
break;
@ -1182,14 +1180,8 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
goto free;
}
new_link[i]->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
new_link[i]->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
new_link[i]->ap_sta_id = IWL_MVM_INVALID_STA;
new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
for (r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++)
new_link[i]->smps_requests[r] =
IEEE80211_SMPS_AUTOMATIC;
iwl_mvm_init_link(new_link[i]);
}
mutex_lock(&mvm->mutex);

View File

@ -146,7 +146,7 @@ int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout :
mvm->trans->trans_cfg->base_params->wd_timeout;
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
return -ENOSPC;
if (sta->type == STATION_TYPE_AUX)
@ -346,7 +346,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(int_sta->sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(int_sta->sta_id == IWL_INVALID_STA))
return -EINVAL;
if (flush)
@ -521,6 +521,9 @@ void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
unsigned int link_id,
bool is_in_fw)
{
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
is_in_fw ? ERR_PTR(-EINVAL) : NULL);
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL);
@ -559,7 +562,10 @@ static int iwl_mvm_mld_alloc_sta_link(struct iwl_mvm *mvm,
u32 sta_id = iwl_mvm_find_free_sta_id(mvm,
ieee80211_vif_type_p2p(vif));
if (sta_id == IWL_MVM_INVALID_STA)
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
if (sta_id == IWL_INVALID_STA)
return -ENOSPC;
if (rcu_access_pointer(sta->link[link_id]) == &sta->deflink) {
@ -612,10 +618,10 @@ static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta,
struct iwl_mvm_link_sta *sta_link)
{
if (!sta->tdls) {
WARN_ON(vif_link->ap_sta_id != IWL_MVM_INVALID_STA);
WARN_ON(vif_link->ap_sta_id != IWL_INVALID_STA);
vif_link->ap_sta_id = sta_link->sta_id;
} else {
WARN_ON(vif_link->ap_sta_id == IWL_MVM_INVALID_STA);
WARN_ON(vif_link->ap_sta_id == IWL_INVALID_STA);
}
}
@ -631,6 +637,9 @@ static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm,
int ret = -EINVAL;
int sta_id;
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
/* First add an empty station since allocating a queue requires
* a valid station. Since we need a link_id to allocate a station,
* pick up the first valid one.
@ -686,7 +695,7 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
spin_lock_init(&mvm_sta->lock);
ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA,
ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_INVALID_STA,
STATION_TYPE_PEER);
} else {
ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
@ -858,9 +867,10 @@ int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
{
int ret;
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON(sta_id == IWL_INVALID_STA))
return 0;
ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
@ -1064,6 +1074,7 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
unsigned int link_id;
int ret;
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
for_each_set_bit(link_id, &old_links_long,
@ -1109,7 +1120,7 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
goto err;
if (vif->type == NL80211_IFTYPE_STATION)
mvm_vif_link->ap_sta_id = IWL_MVM_INVALID_STA;
mvm_vif_link->ap_sta_id = IWL_INVALID_STA;
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id,
false);

View File

@ -1082,6 +1082,7 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_STATION_COUNT_MAX];
/* note: fw_id_to_link_sta must be protected by wiphy and mvm mutexes */
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
u8 rx_ba_sessions;
@ -2104,6 +2105,7 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band);
/* Links */
void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link);
int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@ -2331,7 +2333,7 @@ static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd *cmd);
struct iwl_wowlan_config_cmd_v6 *cmd);
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,

View File

@ -10,7 +10,7 @@
#include "mvm.h"
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd *cmd)
struct iwl_wowlan_config_cmd_v6 *cmd)
{
int i;

View File

@ -1505,8 +1505,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->scan_cmd_size = scan_size;
/* invalidate ids to prevent accidental removal of sta_id 0 */
mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
mvm->aux_sta.sta_id = IWL_INVALID_STA;
mvm->snif_sta.sta_id = IWL_INVALID_STA;
/* Set EBS as successful as long as not stated otherwise by the FW. */
mvm->last_ebs_successful = true;

View File

@ -560,7 +560,8 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
struct iwl_mvm_vif_link_info *link_info,
struct ieee80211_bss_conf *bss_conf)
{
struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
@ -625,6 +626,13 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
return;
/* We're not in EMLSR and our signal is bad, try to switch link maybe */
if (sig < IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH && !mvmvif->esr_active) {
iwl_mvm_int_mlo_scan(mvm, vif);
return;
}
/* We are in EMLSR, check if we need to exit */
exit_esr_thresh =
iwl_mvm_get_esr_rssi_thresh(mvm,
&bss_conf->chanreq.oper,

View File

@ -3597,7 +3597,8 @@ static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
n_channels);
if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
hweight16(vif->valid_links) == 1)
return -EINVAL;
size = struct_size(req, channels, n_channels);

View File

@ -47,7 +47,7 @@ int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
lockdep_is_held(&mvm->mutex)))
return sta_id;
}
return IWL_MVM_INVALID_STA;
return IWL_INVALID_STA;
}
/* Calculate the ampdu density and max size */
@ -1216,7 +1216,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
* can be unshared and finding one (and only one) that can be
* reused.
* This function is also invoked as a sort of clean-up task,
* in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
* in which case @alloc_for_sta is IWL_INVALID_STA.
*
* Returns the queue number, or -ENOSPC.
*/
@ -1309,7 +1309,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
rcu_read_unlock();
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
if (free_queue >= 0 && alloc_for_sta != IWL_INVALID_STA) {
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
alloc_for_sta);
if (ret)
@ -1522,7 +1522,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
mutex_lock(&mvm->mutex);
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
while (!list_empty(&mvm->add_stream_txqs)) {
struct iwl_mvm_txq *mvmtxq;
@ -1580,7 +1580,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
return 0;
/* run the general cleanup/unsharing of queues */
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
/* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
@ -1756,7 +1756,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* this function
*/
if (!mvm->mld_api_is_used) {
if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON(sta_id == IWL_INVALID_STA))
return -EINVAL;
mvm_sta->deflink.sta_id = sta_id;
@ -1865,7 +1865,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
else
sta_id = mvm_sta->deflink.sta_id;
if (sta_id == IWL_MVM_INVALID_STA)
if (sta_id == IWL_INVALID_STA)
return -ENOSPC;
spin_lock_init(&mvm_sta->lock);
@ -1903,10 +1903,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION) {
if (!sta->tdls) {
WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA);
WARN_ON(mvmvif->deflink.ap_sta_id != IWL_INVALID_STA);
mvmvif->deflink.ap_sta_id = sta_id;
} else {
WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA);
WARN_ON(mvmvif->deflink.ap_sta_id == IWL_INVALID_STA);
}
}
@ -2095,7 +2095,7 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0);
/* unassoc - go ahead - remove the AP STA now */
mvm_link->ap_sta_id = IWL_MVM_INVALID_STA;
mvm_link->ap_sta_id = IWL_INVALID_STA;
}
/*
@ -2103,7 +2103,7 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* before the STA is removed.
*/
if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
cancel_delayed_work(&mvm->tdls_cs.dwork);
}
@ -2170,9 +2170,9 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
u8 type)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
sta->sta_id == IWL_MVM_INVALID_STA) {
sta->sta_id == IWL_INVALID_STA) {
sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
return -ENOSPC;
}
@ -2188,7 +2188,7 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
sta->sta_id = IWL_MVM_INVALID_STA;
sta->sta_id = IWL_INVALID_STA;
}
static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
@ -2306,7 +2306,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_INVALID_STA))
return -EINVAL;
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,
@ -2324,7 +2324,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_INVALID_STA))
return -EINVAL;
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,
@ -2389,7 +2389,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid;
if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(bsta->sta_id == IWL_INVALID_STA))
return -ENOSPC;
ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
@ -2644,7 +2644,7 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
u32 status;
/* This is a valid situation for GTK removal */
if (sta_id == IWL_MVM_INVALID_STA)
if (sta_id == IWL_INVALID_STA)
return 0;
key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
@ -3514,7 +3514,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
* station ID, then use AP's station ID.
*/
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) {
mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
u8 sta_id = mvmvif->deflink.ap_sta_id;
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
@ -3569,7 +3569,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
new_api ? 2 : 1);
if (sta_id == IWL_MVM_INVALID_STA)
if (sta_id == IWL_INVALID_STA)
return -EINVAL;
keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
@ -3728,7 +3728,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
if (remove_key) {
/* This is a valid situation for IGTK */
if (sta_id == IWL_MVM_INVALID_STA)
if (sta_id == IWL_INVALID_STA)
return 0;
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
@ -3795,7 +3795,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
return sta->addr;
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) {
mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
u8 sta_id = mvmvif->deflink.ap_sta_id;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
@ -3865,7 +3865,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_sta *mvm_sta;
u8 sta_id = IWL_MVM_INVALID_STA;
u8 sta_id = IWL_INVALID_STA;
int ret;
static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
@ -3966,7 +3966,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
{
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
struct iwl_mvm_sta *mvm_sta;
u8 sta_id = IWL_MVM_INVALID_STA;
u8 sta_id = IWL_INVALID_STA;
int ret, i;
lockdep_assert_held(&mvm->mutex);
@ -4273,7 +4273,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
return;
/* Need to block/unblock also multicast station */
if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA)
if (mvmvif->deflink.mcast_sta.sta_id != IWL_INVALID_STA)
iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
&mvmvif->deflink.mcast_sta,
disable);
@ -4282,7 +4282,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
* Only unblock the broadcast station (FW blocks it for immediate
* quiet, not the driver)
*/
if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA)
if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_INVALID_STA)
iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
&mvmvif->deflink.bcast_sta,
disable);

View File

@ -196,7 +196,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
if (state == IWL_MVM_TDLS_SW_IDLE)
mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
mvm->tdls_cs.cur_sta_id = IWL_INVALID_STA;
}
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
@ -250,7 +250,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
/* get the existing peer if it's there */
if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) {
struct ieee80211_sta *sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
lockdep_is_held(&mvm->mutex));
@ -465,7 +465,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA)
return;
sta = rcu_dereference_protected(
@ -512,7 +512,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
sta->addr, chandef->chan->center_freq, chandef->width);
/* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
if (mvm->tdls_cs.peer.sta_id != IWL_INVALID_STA) {
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
@ -566,7 +566,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
/* we only support a single peer for channel switching */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA) {
IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
goto out;
}
@ -587,7 +587,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
wait_for_phy = true;
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
dev_kfree_skb(mvm->tdls_cs.peer.skb);
mvm->tdls_cs.peer.skb = NULL;
@ -630,7 +630,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
params->status != 0 &&
mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) {
struct ieee80211_sta *cur_sta;
/* make sure it's the same peer */

View File

@ -1213,7 +1213,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
return -1;
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA))
return -1;
if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
@ -1357,7 +1357,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA))
return -1;
memcpy(&info, skb->cb, sizeof(info));

View File

@ -261,7 +261,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
.data = { lq, },
};
if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
if (WARN_ON(lq->sta_id == IWL_INVALID_STA ||
iwl_mvm_has_tlc_offload(mvm)))
return -EINVAL;
@ -679,10 +679,8 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bss_iface_iterator, &bss_iter_data);
if (bss_iter_data.error) {
IWL_ERR(mvm, "More than one managed interface active!\n");
if (bss_iter_data.error)
return ERR_PTR(-EINVAL);
}
return bss_iter_data.vif;
}

View File

@ -1643,6 +1643,8 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
out:
if (*status == IWL_D3_STATUS_ALIVE)
ret = iwl_pcie_d3_handshake(trans, false);
else
trans->state = IWL_TRANS_NO_FW;
return ret;
}

View File

@ -71,7 +71,7 @@ MODULE_FIRMWARE(MT7628_FIRMWARE_E2);
struct platform_driver mt76_wmac_driver = {
.probe = mt76_wmac_probe,
.remove_new = mt76_wmac_remove,
.remove = mt76_wmac_remove,
.driver = {
.name = "mt76_wmac",
.of_match_table = of_wmac_match,

View File

@ -63,7 +63,7 @@ struct platform_driver mt7622_wmac_driver = {
.of_match_table = mt7622_wmac_of_match,
},
.probe = mt7622_wmac_probe,
.remove_new = mt7622_wmac_remove,
.remove = mt7622_wmac_remove,
};
MODULE_FIRMWARE(MT7622_FIRMWARE_N9);

View File

@ -1303,7 +1303,7 @@ struct platform_driver mt798x_wmac_driver = {
.of_match_table = mt798x_wmac_of_match,
},
.probe = mt798x_wmac_probe,
.remove_new = mt798x_wmac_remove,
.remove = mt798x_wmac_remove,
};
MODULE_FIRMWARE(MT7986_FIRMWARE_WA);

Some files were not shown because too many files have changed in this diff Show More