summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath12k/dp_mon.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/dp_mon.c')
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1097
1 files changed, 1042 insertions, 55 deletions
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index d22800e89485..28cadc4167f7 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1416,6 +1416,40 @@ ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_
}
}
+static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
+{
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+}
+
+static void
+ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
+ const struct hal_rx_msdu_end *msdu_end)
+{
+ ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
+ &pmon->err_bitmap);
+ pmon->decap_format = le32_get_bits(msdu_end->info1,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
struct ath12k_mon_data *pmon,
@@ -1647,7 +1681,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
}
- break;
+ return HAL_RX_MON_STATUS_MPDU_START;
}
case HAL_RX_MSDU_START:
/* TODO: add msdu start parsing logic */
@@ -1655,6 +1689,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
case HAL_MON_BUF_ADDR:
return HAL_RX_MON_STATUS_BUF_ADDR;
case HAL_RX_MSDU_END:
+ ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data);
return HAL_RX_MON_STATUS_MSDU_END;
case HAL_RX_MPDU_END:
return HAL_RX_MON_STATUS_MPDU_END;
@@ -1688,45 +1723,295 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
+static void
+ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ u32 center_freq = ppdu_info->freq;
+
+ rx_status->freq = center_freq;
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(ppdu_info->bw);
+ rx_status->nss = ppdu_info->nss;
+ rx_status->rate_idx = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_6GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_6GHZ;
+ } else if (center_freq >= ATH12K_MIN_2GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_2GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (center_freq >= ATH12K_MIN_5GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_5GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ rx_status->band = NUM_NL80211_BANDS;
+ }
+}
+
+static struct sk_buff
+*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int *buf_id)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ RX_MON_STATUS_BUF_ALIGN)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+static enum dp_mon_status_buf_state
+ath12k_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_mon_ring *rx_ring)
+{
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath12k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath12k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
+static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* Hold on mon dest ring, and reap mon status ring. */
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* PPDU ID has exceeded the maximum value and will
+ * restart from 0.
+ */
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static
+void ath12k_dp_mon_next_link_desc_get(struct hal_rx_msdu_link *msdu_link,
+ dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm,
+ struct ath12k_buffer_addr **pp_buf_addr_info)
+{
+ struct ath12k_buffer_addr *buf_addr_info;
+
+ buf_addr_info = &msdu_link->buf_addr_info;
+
+ ath12k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
+
+ *pp_buf_addr_info = buf_addr_info;
+}
+
+static void
+ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 rate_mcs, nss, sgi;
+ bool is_cck;
+
+ pkt_type = ppdu_info->preamble_type;
+ rate_mcs = ppdu_info->rate;
+ nss = ppdu_info->nss;
+ sgi = ppdu_info->gi;
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ if (rx_status->band < NUM_NL80211_BANDS) {
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ }
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11BE:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in EHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_EHT;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ break;
+ default:
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "monitor receives invalid preamble type %d",
+ pkt_type);
+ break;
+ }
+}
+
static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
struct sk_buff *head_msdu,
struct sk_buff *tail_msdu)
{
- u32 rx_pkt_offset, l2_hdr_offset;
+ u32 rx_pkt_offset, l2_hdr_offset, total_offset;
rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset =
ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
- skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset);
+
+ if (ar->ab->hw_params->rxdma1_enable)
+ total_offset = ATH12K_MON_RX_PKT_OFFSET;
+ else
+ total_offset = rx_pkt_offset + l2_hdr_offset;
+
+ skb_pull(head_msdu, total_offset);
}
static struct sk_buff *
ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
- struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
- struct ieee80211_rx_status *rxs, bool *fcs_err)
+ struct dp_mon_mpdu *mon_mpdu,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rxs)
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
- struct hal_rx_desc *rx_desc, *tail_rx_desc;
- u8 *hdr_desc, *dest, decap_format;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct hal_rx_desc *rx_desc;
+ u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format;
struct ieee80211_hdr_3addr *wh;
- u32 err_bitmap, frag_list_sum_len = 0;
+ struct ieee80211_channel *channel;
+ u32 frag_list_sum_len = 0;
+ u8 channel_num = ppdu_info->chan_num;
mpdu_buf = NULL;
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
- if (!head_msdu)
+ if (!head_msdu || !tail_msdu)
goto err_merge_fail;
- rx_desc = (struct hal_rx_desc *)head_msdu->data;
- tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data;
+ ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs);
+
+ if (unlikely(rxs->band == NUM_NL80211_BANDS ||
+ !ath12k_ar_to_hw(ar)->wiphy->bands[rxs->band])) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+ rxs->band, channel_num, ppdu_info->freq, ar->pdev_idx);
- err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc);
- if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
- *fcs_err = true;
+ spin_lock_bh(&ar->data_lock);
+ channel = ar->rx_channel;
+ if (channel) {
+ rxs->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
- decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc);
+ if (rxs->band < NUM_NL80211_BANDS)
+ rxs->freq = ieee80211_channel_to_frequency(channel_num,
+ rxs->band);
- ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs);
+ ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
@@ -1736,7 +2021,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
head_frag_list = NULL;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (!head_frag_list)
head_frag_list = msdu;
@@ -1748,7 +2033,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
prev_buf->next = NULL;
- skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ skb_trim(prev_buf, prev_buf->len);
if (head_frag_list) {
skb_shinfo(head_msdu)->frag_list = head_frag_list;
head_msdu->data_len = frag_list_sum_len;
@@ -1771,7 +2056,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
msdu = head_msdu;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
@@ -1954,7 +2239,8 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ u8 decap)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
@@ -1966,10 +2252,12 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u8 decap = DP_RX_DECAP_TYPE_RAW;
+ struct ath12k_dp_rx_info rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
+ status->link_valid = 0;
+
if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
!(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
@@ -1977,10 +2265,9 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
status->flag |= RX_FLAG_RADIOTAP_HE;
}
- if (!(status->flag & RX_FLAG_ONLY_MONITOR))
- decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ rx_info.addr2_present = false;
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info);
if (peer && peer->sta) {
pubsta = peer->sta;
if (pubsta->valid_links) {
@@ -2035,25 +2322,23 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
- struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
+ struct dp_mon_mpdu *mon_mpdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
- bool fcs_err = false;
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
- mon_skb = ath12k_dp_mon_rx_merg_msdus(ar,
- head_msdu, tail_msdu,
- rxs, &fcs_err);
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mon_mpdu, ppduinfo, rxs);
if (!mon_skb)
goto mon_deliver_fail;
header = mon_skb;
rxs->flag = 0;
- if (fcs_err)
+ if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS)
rxs->flag = RX_FLAG_FAILED_FCS_CRC;
do {
@@ -2070,8 +2355,12 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (!(rxs->flag & RX_FLAG_ONLY_MONITOR))
+ decap = mon_mpdu->decap_format;
+
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
- ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
@@ -2079,7 +2368,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
return 0;
mon_deliver_fail:
- mon_skb = head_msdu;
+ mon_skb = mon_mpdu->head;
while (mon_skb) {
skb_next = mon_skb->next;
dev_kfree_skb_any(mon_skb);
@@ -2088,6 +2377,133 @@ mon_deliver_fail:
return -EINVAL;
}
+static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+
+ return 0;
+}
+
+/* Hardware fill buffer with 128 bytes aligned. So need to reap it
+ * with 128 bytes aligned.
+ */
+#define RXDMA_DATA_DMA_BLOCK_SIZE 128
+
+static void
+ath12k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ *is_frag = true;
+ *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE -
+ sizeof(struct hal_rx_desc)) &
+ ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1);
+ *total_len += *frag_len;
+ } else {
+ if (*is_frag)
+ *frag_len = info->msdu_len - *total_len;
+ else
+ *frag_len = info->msdu_len;
+
+ *msdu_cnt -= 1;
+ }
+}
+
+static int
+ath12k_dp_mon_parse_status_buf(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ const struct dp_mon_packet_info *packet_info)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+ struct sk_buff *msdu;
+ int buf_id;
+ u32 offset;
+
+ buf_id = u32_get_bits(packet_info->cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath12k_warn(ab, "mon dest desc with inval buf_id %d\n", buf_id);
+ return 0;
+ }
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ offset = packet_info->dma_length + ATH12K_MON_RX_DOT11_OFFSET;
+ if (ath12k_dp_pkt_set_pktlen(msdu, offset)) {
+ dev_kfree_skb_any(msdu);
+ goto dest_replenish;
+ }
+
+ if (!pmon->mon_mpdu->head)
+ pmon->mon_mpdu->head = msdu;
+ else
+ pmon->mon_mpdu->tail->next = msdu;
+
+ pmon->mon_mpdu->tail = msdu;
+
+dest_replenish:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ enum hal_rx_mon_status hal_status,
+ const void *tlv_data)
+{
+ switch (hal_status) {
+ case HAL_RX_MON_STATUS_MPDU_START:
+ if (WARN_ON_ONCE(pmon->mon_mpdu))
+ break;
+
+ pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC);
+ if (!pmon->mon_mpdu)
+ return -ENOMEM;
+ break;
+ case HAL_RX_MON_STATUS_BUF_ADDR:
+ return ath12k_dp_mon_parse_status_buf(ar, pmon, tlv_data);
+ case HAL_RX_MON_STATUS_MPDU_END:
+ /* If no MSDU then free empty MPDU */
+ if (pmon->mon_mpdu->tail) {
+ pmon->mon_mpdu->tail->next = NULL;
+ list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ } else {
+ kfree(pmon->mon_mpdu);
+ }
+ pmon->mon_mpdu = NULL;
+ break;
+ case HAL_RX_MON_STATUS_MSDU_END:
+ pmon->mon_mpdu->decap_format = pmon->decap_format;
+ pmon->mon_mpdu->err_bitmap = pmon->err_bitmap;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static enum hal_rx_mon_status
ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
struct sk_buff *skb)
@@ -2114,14 +2530,20 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
+
+ if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable &&
+ ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value))
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+
ptr += sizeof(*tlv) + tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
- if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ if ((ptr - skb->data) > skb->len)
break;
} while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
(hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+ (hal_status == HAL_RX_MON_STATUS_MPDU_START) ||
(hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
(hal_status == HAL_RX_MON_STATUS_MSDU_END));
@@ -2141,23 +2563,21 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct dp_mon_mpdu *tmp;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
- struct sk_buff *head_msdu, *tail_msdu;
- enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ enum hal_rx_mon_status hal_status;
- ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE)
+ return hal_status;
list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
- head_msdu = mon_mpdu->head;
- tail_msdu = mon_mpdu->tail;
- if (head_msdu && tail_msdu) {
- ath12k_dp_mon_rx_deliver(ar, head_msdu,
- tail_msdu, ppdu_info, napi);
- }
+ if (mon_mpdu->head && mon_mpdu->tail)
+ ath12k_dp_mon_rx_deliver(ar, mon_mpdu, ppdu_info, napi);
kfree(mon_mpdu);
}
+
return hal_status;
}
@@ -2236,6 +2656,94 @@ fail_alloc_skb:
return -ENOMEM;
}
+int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int req_entries)
+{
+ enum hal_rx_buf_return_buf_manager mgr =
+ ab->hw_params->hal_params->rx_buf_rbm;
+ int num_free, num_remain, buf_id;
+ struct ath12k_buffer_addr *desc;
+ struct hal_srng *srng;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ u32 cookie;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ RX_MON_STATUS_BUF_ALIGN)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+ cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_buf_unassign;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ num_remain--;
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_buf_unassign:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
static struct dp_mon_tx_ppdu_info *
ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
unsigned int ppdu_id,
@@ -2838,16 +3346,13 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct dp_mon_mpdu *tmp, *mon_mpdu;
- struct sk_buff *head_msdu, *tail_msdu;
list_for_each_entry_safe(mon_mpdu, tmp,
&tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
- head_msdu = mon_mpdu->head;
- tail_msdu = mon_mpdu->tail;
- if (head_msdu)
- ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu,
+ if (mon_mpdu->head)
+ ath12k_dp_mon_rx_deliver(ar, mon_mpdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
@@ -2949,11 +3454,10 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
u32 num_msdu;
- if (!rx_stats)
- return;
-
arsta->rssi_comb = ppdu_info->rssi_comb;
ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+ if (!rx_stats)
+ return;
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -3126,14 +3630,12 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
ahsta = ath12k_sta_to_ahsta(peer->sta);
arsta = &ahsta->deflink;
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
rx_stats = arsta->rx_stats;
-
if (!rx_stats)
return;
- arsta->rssi_comb = ppdu_info->rssi_comb;
- ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
-
num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
user_stats->udp_msdu_count + user_stats->other_msdu_count;
@@ -3346,7 +3848,7 @@ move_next:
ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
while ((skb = __skb_dequeue(&skb_list))) {
- hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ hal_status = ath12k_dp_mon_rx_parse_mon_status(ar, pmon, skb, napi);
if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
ppdu_info->ppdu_continuation = true;
dev_kfree_skb_any(skb);
@@ -3388,6 +3890,487 @@ free_skb:
return num_buffs_reaped;
}
+static int ath12k_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ const struct ath12k_hw_hal_params *hal_params;
+ int buf_id, srng_id, num_buffs_reaped = 0;
+ enum dp_mon_status_buf_state reap_status;
+ struct dp_rxdma_mon_ring *rx_ring;
+ struct ath12k_mon_data *pmon;
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ void *rx_mon_status_desc;
+ struct hal_srng *srng;
+ struct ath12k_dp *dp;
+ struct sk_buff *skb;
+ struct ath12k *ar;
+ dma_addr_t paddr;
+ u32 cookie;
+ u8 rbm;
+
+ ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
+ dp = &ab->dp;
+ pmon = &ar->dp.mon_data;
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ break;
+ }
+ ath12k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb) {
+ ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ pmon->buf_state = DP_MON_STATUS_NO_DMA;
+ ath12k_warn(ab,
+ "mon status DONE not set %llx, buf_id %d\n",
+ le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG),
+ buf_id);
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
+ */
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+ reap_status = ath12k_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) {
+ dev_kfree_skb_any(skb);
+ goto move_next;
+ }
+ __skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ }
+move_next:
+ skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+
+ if (!skb) {
+ ath12k_warn(ab, "failed to alloc buffer for status ring\n");
+ hal_params = ab->hw_params->hal_params;
+ ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ hal_params->rx_buf_rbm);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie,
+ ab->hw_params->hal_params->rx_buf_rbm);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static u32
+ath12k_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct list_head *used_list,
+ u32 *npackets, u32 *ppdu_id)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ bool is_frag, is_first_msdu, drop_mpdu = false;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ u32 rx_bufs_used = 0, i = 0, desc_bank = 0;
+ struct hal_rx_desc *rx_desc, *tail_rx_desc;
+ struct hal_rx_msdu_link *msdu_link_desc;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_buffer_addr buf_info;
+ struct hal_rx_msdu_list msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ u16 num_msdus = 0;
+ dma_addr_t paddr;
+ u8 rbm;
+
+ ath12k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie,
+ &p_last_buf_addr_info, &rbm,
+ &msdu_cnt);
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ if (le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err = le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+
+ desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK);
+ msdu_link_desc =
+ ar->ab->dp.link_desc_banks[desc_bank].vaddr +
+ (paddr - ar->ab->dp.link_desc_banks[desc_bank].paddr);
+
+ ath12k_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list,
+ &num_msdus);
+ desc_info = ath12k_dp_get_rx_desc(ar->ab,
+ msdu_list.sw_cookie[num_msdus - 1]);
+ tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data;
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+
+ desc_info =
+ ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]);
+ msdu = desc_info->skb;
+
+ if (!msdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "msdu_pop: invalid msdu (%d/%d)\n",
+ i + 1, num_msdus);
+ goto next_msdu;
+ }
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ if (rxcb->paddr != msdu_list.paddr[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d paddr %lx != %lx\n",
+ i, (unsigned long)rxcb->paddr,
+ (unsigned long)msdu_list.paddr[i]);
+ drop_mpdu = true;
+ continue;
+ }
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc);
+ if (is_first_msdu) {
+ if (!ath12k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+ msdu_ppdu_id =
+ ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
+
+ if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id)) {
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath12k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) {
+ dev_kfree_skb_any(msdu);
+ goto next_msdu;
+ }
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, used_list);
+ }
+
+ ath12k_hal_rx_buf_addr_info_set(&buf_info, paddr, sw_cookie, rbm);
+
+ ath12k_dp_mon_next_link_desc_get(msdu_link_desc, &paddr,
+ &sw_cookie, &rbm,
+ &p_buf_addr_info);
+
+ ath12k_dp_rx_link_desc_return(ar->ab, &buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
+static void ath12k_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats;
+ u32 ppdu_id, rx_bufs_used = 0, ring_id;
+ u32 mpdu_rx_bufs_used, npackets = 0;
+ struct ath12k_dp *dp = &ar->ab->dp;
+ struct ath12k_base *ab = ar->ab;
+ void *ring_entry, *mon_dst_srng;
+ struct dp_mon_mpdu *tmp_mpdu;
+ LIST_HEAD(rx_desc_used_list);
+ struct hal_srng *srng;
+
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+ srng = &ab->hal.srng_list[ring_id];
+
+ mon_dst_srng = &ab->hal.srng_list[ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath12k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu, &tail_msdu,
+ &rx_desc_used_list,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ spin_unlock_bh(&pmon->mon_lock);
+ continue;
+ }
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ spin_unlock_bh(&pmon->mon_lock);
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ break;
+ }
+
+ if (head_msdu && tail_msdu) {
+ tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC);
+ if (!tmp_mpdu)
+ break;
+
+ tmp_mpdu->head = head_msdu;
+ tmp_mpdu->tail = tail_msdu;
+ tmp_mpdu->err_bitmap = pmon->err_bitmap;
+ tmp_mpdu->decap_format = pmon->decap_format;
+ ath12k_dp_mon_rx_deliver(ar, tmp_mpdu,
+ &pmon->mon_ppdu_info, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ kfree(tmp_mpdu);
+ }
+
+ ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath12k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ ath12k_dp_rx_bufs_replenish(ar->ab,
+ &dp->rx_refill_buf_ring,
+ &rx_desc_used_list,
+ rx_bufs_used);
+ }
+}
+
+static int
+__ath12k_dp_mon_process_ring(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff_head skb_list;
+ int num_buffs_reaped;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath12k_dp_rx_reap_mon_status_ring(ar->ab, mac_id,
+ budget, &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+
+ if (ar->monitor_started &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath12k_dp_rx_mon_dest_process(ar, mac_id, *budget, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+exit:
+ return num_buffs_reaped;
+}
+
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode)
@@ -3398,6 +4381,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
if (ab->hw_params->rxdma1_enable) {
if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
+ } else {
+ if (ar->monitor_started)
+ num_buffs_reaped =
+ __ath12k_dp_mon_process_ring(ar, mac_id, napi, &budget);
}
return num_buffs_reaped;