wifi: ath12k: add partner device buffer support in receive data path

Currently, partner device buffer is not handled in the receive data path. In
Multi-Link Operation, the partner device buffer is reported to the primary
upper MAC rings. Therefore, add partner device buffer processing in the REO and
Exception ring handler.

Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.3.1-00173-QCAHKSWPL_SILICONZ-1
Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.0.c5-00481-QCAHMTSWPL_V1.0_V2.0_SILICONZ-3

Signed-off-by: Karthikeyan Periyasamy <quic_periyasa@quicinc.com>
Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
Link: https://patch.msgid.link/20241209185421.376381-3-kvalo@kernel.org
Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
This commit is contained in:
Karthikeyan Periyasamy 2024-12-09 20:54:14 +02:00 committed by Jeff Johnson
parent 1a73acb5fb
commit 1d18b197bc
4 changed files with 140 additions and 49 deletions

View File

@ -1229,4 +1229,10 @@ static inline void ath12k_core_stopped(struct ath12k_base *ab)
ab->ag->num_started--;
}
static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
u8 device_id)
{
return ag->ab[device_id];
}
#endif /* _CORE_H_ */

View File

@ -1445,6 +1445,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
rx_descs[j].device_id = ab->device_id;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */

View File

@ -287,7 +287,8 @@ struct ath12k_rx_desc_info {
u32 cookie;
u32 magic;
u8 in_use : 1,
reserved : 7;
device_id : 3,
reserved : 4;
};
struct ath12k_tx_desc_info {

View File

@ -2601,6 +2601,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct sk_buff *msdu;
struct ath12k *ar;
struct ath12k_hw_link *hw_links = ag->hw_links;
struct ath12k_base *partner_ab;
u8 hw_link_id, pdev_id;
int ret;
@ -2612,11 +2613,12 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
while ((msdu = __skb_dequeue(msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
hw_link_id = rxcb->hw_link_id;
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params,
partner_ab = ath12k_ag_to_ab(ag,
hw_links[hw_link_id].device_id);
pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
hw_links[hw_link_id].pdev_idx);
ar = ab->pdevs[pdev_id].ar;
if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
ar = partner_ab->pdevs[pdev_id].ar;
if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
@ -2666,23 +2668,29 @@ static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
LIST_HEAD(rx_desc_used_list);
struct ath12k_hw_group *ag = ab->ag;
struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
struct ath12k_hw_link *hw_links = ag->hw_links;
int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
struct hal_reo_dest_ring *desc;
int num_buffs_reaped = 0;
struct ath12k_base *partner_ab;
struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
u8 hw_link_id, device_id;
struct hal_srng *srng;
struct sk_buff *msdu;
bool done = false;
u8 hw_link_id;
u64 desc_va;
__skb_queue_head_init(&msdu_list);
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
@ -2706,11 +2714,22 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
le32_to_cpu(desc->buf_va_lo));
desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
device_id = hw_links[hw_link_id].device_id;
partner_ab = ath12k_ag_to_ab(ag, device_id);
if (unlikely(!partner_ab)) {
if (desc_info->skb) {
dev_kfree_skb_any(desc_info->skb);
desc_info->skb = NULL;
}
continue;
}
/* retry manual desc retrieval */
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
if (!desc_info) {
ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
cookie);
continue;
}
@ -2722,14 +2741,14 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
msdu = desc_info->skb;
desc_info->skb = NULL;
list_add_tail(&desc_info->list, &rx_desc_used_list);
list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
num_buffs_reaped++;
num_buffs_reaped[device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
@ -2786,8 +2805,17 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
if (!total_msdu_reaped)
goto exit;
ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
num_buffs_reaped);
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
partner_ab = ath12k_ag_to_ab(ag, device_id);
rx_ring = &partner_ab->dp.rx_refill_buf_ring;
ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
&rx_desc_used_list[device_id],
num_buffs_reaped[device_id]);
}
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@ -3478,7 +3506,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
struct ath12k_hw_group *ag = ab->ag;
struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@ -3487,11 +3517,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
struct ath12k_hw_link *hw_links = ag->hw_links;
LIST_HEAD(rx_desc_used_list);
struct ath12k_base *partner_ab;
u8 hw_link_id, device_id;
u32 desc_bank, num_msdus;
struct hal_srng *srng;
struct ath12k_dp *dp;
u8 hw_link_id;
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
@ -3501,9 +3530,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
tot_n_bufs_reaped = 0;
quota = budget;
dp = &ab->dp;
reo_except = &dp->reo_except_ring;
link_desc_banks = dp->link_desc_banks;
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
reo_except = &ab->dp.reo_except_ring;
srng = &ab->hal.srng_list[reo_except->ring_id];
@ -3526,21 +3556,24 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
hw_link_id = le32_get_bits(reo_desc->info0,
HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
device_id = hw_links[hw_link_id].device_id;
partner_ab = ath12k_ag_to_ab(ag, device_id);
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params,
pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
hw_links[hw_link_id].pdev_idx);
ar = ab->pdevs[pdev_id].ar;
ar = partner_ab->pdevs[pdev_id].ar;
link_desc_banks = partner_ab->dp.link_desc_banks;
link_desc_va = link_desc_banks[desc_bank].vaddr +
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != dp->idle_link_rbm &&
if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != ab->hw_params->hal_params->rx_buf_rbm) {
rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath12k_dp_rx_link_desc_return(ab, reo_desc,
ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@ -3550,20 +3583,26 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
/* Process only rx fragments with one msdu per link desc below, and drop
* msdu's indicated due to error reasons.
* Dynamic fragmentation not supported in Multi-link client, so drop the
* partner device buffers.
*/
if (!is_frag || num_msdus > 1) {
if (!is_frag || num_msdus > 1 ||
partner_ab->device_id != ab->device_id) {
drop = true;
/* Return the link desc back to wbm idle list */
ath12k_dp_rx_link_desc_return(ab, reo_desc,
ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
for (i = 0; i < num_msdus; i++) {
if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
&rx_desc_used_list,
&rx_desc_used_list[device_id],
drop,
msdu_cookies[i]))
msdu_cookies[i])) {
num_buffs_reaped[device_id]++;
tot_n_bufs_reaped++;
}
}
if (tot_n_bufs_reaped >= quota) {
@ -3579,10 +3618,17 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
spin_unlock_bh(&srng->lock);
rx_ring = &dp->rx_refill_buf_ring;
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
tot_n_bufs_reaped);
partner_ab = ath12k_ag_to_ab(ag, device_id);
rx_ring = &partner_ab->dp.rx_refill_buf_ring;
ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
&rx_desc_used_list[device_id],
num_buffs_reaped[device_id]);
}
return tot_n_bufs_reaped;
}
@ -3799,7 +3845,8 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
LIST_HEAD(rx_desc_used_list);
struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
@ -3809,18 +3856,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
u8 hw_link_id;
int num_buffs_reaped = 0;
int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
struct ath12k_hw_link *hw_links = ab->ag->hw_links;
struct ath12k_hw_link *hw_links = ag->hw_links;
struct ath12k_base *partner_ab;
u8 hw_link_id, device_id;
int ret, pdev_id;
struct hal_rx_desc *msdu_data;
__skb_queue_head_init(&msdu_list);
__skb_queue_head_init(&scatter_msdu_list);
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
@ -3856,14 +3907,27 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
msdu = desc_info->skb;
desc_info->skb = NULL;
list_add_tail(&desc_info->list, &rx_desc_used_list);
device_id = desc_info->device_id;
partner_ab = ath12k_ag_to_ab(ag, device_id);
if (unlikely(!partner_ab)) {
dev_kfree_skb_any(msdu);
/* In any case continuation bit is set
* in the previous record, cleanup scatter_msdu_list
*/
ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
continue;
}
list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
dma_unmap_single(partner_ab->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
num_buffs_reaped++;
num_buffs_reaped[device_id]++;
total_num_buffs_reaped++;
if (!err_info.continuation)
budget--;
@ -3887,9 +3951,9 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
continue;
}
hw_link_id = ath12k_dp_rx_get_msdu_src_link(ab,
hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
msdu_data);
if (hw_link_id >= MAX_RADIOS) {
if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
dev_kfree_skb_any(msdu);
/* In any case continuation bit is set
@ -3925,20 +3989,39 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
spin_unlock_bh(&srng->lock);
if (!num_buffs_reaped)
if (!total_num_buffs_reaped)
goto done;
ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
num_buffs_reaped);
for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
partner_ab = ath12k_ag_to_ab(ag, device_id);
rx_ring = &partner_ab->dp.rx_refill_buf_ring;
ath12k_dp_rx_bufs_replenish(ab, rx_ring,
&rx_desc_used_list[device_id],
num_buffs_reaped[device_id]);
}
rcu_read_lock();
while ((msdu = __skb_dequeue(&msdu_list))) {
rxcb = ATH12K_SKB_RXCB(msdu);
hw_link_id = rxcb->hw_link_id;
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params,
device_id = hw_links[hw_link_id].device_id;
partner_ab = ath12k_ag_to_ab(ag, device_id);
if (unlikely(!partner_ab)) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
hw_link_id, device_id);
dev_kfree_skb_any(msdu);
continue;
}
pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
hw_links[hw_link_id].pdev_idx);
ar = ab->pdevs[pdev_id].ar;
ar = partner_ab->pdevs[pdev_id].ar;
if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) {
dev_kfree_skb_any(msdu);
@ -3953,7 +4036,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
rcu_read_unlock();
done:
return num_buffs_reaped;
return total_num_buffs_reaped;
}
void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)