diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/dp_mon.c')
-rw-r--r-- | drivers/net/wireless/ath/ath12k/dp_mon.c | 2494 |
1 files changed, 2170 insertions, 324 deletions
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 5a21961cfd46..28cadc4167f7 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "dp_mon.h" @@ -10,6 +10,12 @@ #include "dp_tx.h" #include "peer.h" +#define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits) \ + u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits) + +#define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \ + u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits) + static void ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user, struct hal_rx_user_status *rx_user_status) @@ -75,7 +81,7 @@ ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats * static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig, struct hal_rx_mon_ppdu_info *ppdu_info) { - u32 nsts, group_id, info0, info1; + u32 nsts, info0, info1; u8 gi_setting; info0 = __le32_to_cpu(vht_sig->info0); @@ -103,12 +109,8 @@ static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vh ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW); ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED); - group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); - if (group_id == 0 || group_id == 63) - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; - else - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; - ppdu_info->vht_flag_values5 = group_id; + ppdu_info->vht_flag_values5 = u32_get_bits(info0, + HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) | ppdu_info->nss); ppdu_info->vht_flag_values2 = ppdu_info->bw; @@ -128,7 +130,6 @@ static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig, ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING); ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI); ppdu_info->nss = (ppdu_info->mcs >> 3); - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, @@ -160,7 +161,6 @@ static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, ppdu_info->rate = rate; ppdu_info->cck_flag = 1; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, @@ -200,7 +200,6 @@ static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, } ppdu_info->rate = rate; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } static void @@ -237,7 +236,6 @@ ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *of ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS); ppdu_info->beamformed = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF); - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; } static void @@ -277,7 +275,6 @@ ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION); ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); ppdu_info->he_RU[0] = ru_tones; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; } static void @@ -411,7 +408,6 @@ ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_ ppdu_info->is_stbc = info1 & HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; } static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a, @@ -559,17 +555,921 @@ static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info * dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); ppdu_info->dcm = dcm; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + u32 common; + + ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0, + HAL_RX_USIG_CMN_INFO0_BW); + ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0, + HAL_RX_USIG_CMN_INFO0_UL_DL); + + common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common); + common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_PHY_VERSION, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) | + u32_encode_bits(ppdu_info->u_sig_info.bw, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) | + u32_encode_bits(ppdu_info->u_sig_info.ul_dl, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_BSS_COLOR, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_TXOP, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + ppdu_info->u_sig_info.usig.common = cpu_to_le32(common); + + switch (ppdu_info->u_sig_info.bw) { + default: + fallthrough; + case HAL_EHT_BW_20: + ppdu_info->bw = HAL_RX_BW_20MHZ; + break; + case HAL_EHT_BW_40: + ppdu_info->bw = HAL_RX_BW_40MHZ; + break; + case HAL_EHT_BW_80: + ppdu_info->bw = HAL_RX_BW_80MHZ; + break; + case HAL_EHT_BW_160: + ppdu_info->bw = HAL_RX_BW_160MHZ; + break; + case HAL_EHT_BW_320_1: + case HAL_EHT_BW_320_2: + ppdu_info->bw = HAL_RX_BW_320MHZ; + break; + } +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; + enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2; + u32 common, value, mask; + + spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1; + spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2; + + common = __le32_to_cpu(usig->common); + value = __le32_to_cpu(usig->value); + mask = __le32_to_cpu(usig->mask); + + ppdu_info->u_sig_info.ppdu_type_comp_mode = + le32_get_bits(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE); + + common |= ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1, + spatial_reuse1) | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2, + spatial_reuse2) | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_TAIL, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL); + + mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | + spatial_reuse1 | spatial_reuse2 | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL; + + usig->common = cpu_to_le32(common); + usig->value = cpu_to_le32(value); + usig->mask = cpu_to_le32(mask); +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; + enum ieee80211_radiotap_eht_usig_mu sig_symb, punc; + u32 common, value, mask; + + sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS; + punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO; + + common = __le32_to_cpu(usig->common); + value = __le32_to_cpu(usig->value); + mask = __le32_to_cpu(usig->mask); + + ppdu_info->u_sig_info.ppdu_type_comp_mode = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); + ppdu_info->u_sig_info.eht_sig_mcs = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS); + ppdu_info->u_sig_info.num_eht_sig_sym = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM); + + common |= ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO, + punc) | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | + u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) | + u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym, + sig_symb) | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_TAIL, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL); + + mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | + punc | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS | + sig_symb | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL; + + usig->common = cpu_to_le32(common); + usig->value = cpu_to_le32(value); + usig->mask = cpu_to_le32(mask); +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + u8 comp_mode; + + ppdu_info->eht_usig = true; + + ath12k_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info); + + comp_mode = le32_get_bits(usig->non_cmn.mu.info0, + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); + + if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl) + ath12k_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info); + else + ath12k_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info); +} + +static void +ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info, + u16 tlv_len, const void *tlv_data) +{ + if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) { + memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len, + tlv_data, tlv_len); + ppdu_info->tlv_aggr.cur_len += tlv_len; + } +} + +static inline bool +ath12k_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info) +{ + if (usig_info->ppdu_type_comp_mode == 1 && + usig_info->eht_sig_mcs == 0 && + usig_info->num_eht_sig_sym == 0) + return true; + + return false; +} + +static inline bool +ath12k_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info) +{ + u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode; + u32 ul_dl = usig_info->ul_dl; + + if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) || + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) || + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1)) + return true; + + return false; +} + +static inline bool +ath12k_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info) +{ + if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0) + return true; + + return false; +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S | + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S | + IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 | + IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[0]); + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + /* GI and LTF size are separately indicated in radiotap header + * and hence will be parsed from other TLV + */ + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S); + eht->data[0] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[7]); + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS, + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED, + IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); + eht->data[7] = cpu_to_le32(data); +} + +static void +ath12k_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[0]); + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + + /* GI and LTF size are separately indicated in radiotap header + * and hence will be parsed from other TLV + */ + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O); + eht->data[0] = cpu_to_le32(data); +} + +static void +ath12k_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[7]); + data |= ATH12K_LE32_DEC_ENC(eb->info0, + HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + eht->data[7] = cpu_to_le32(data); +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; + u32 user_idx; + + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) + return; + + user_idx = eht_info->num_user_info++; + + eht_info->user_info[user_idx] |= + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M); + + ppdu_info->mcs = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS); +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; + u32 user_idx; + + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) + return; + + user_idx = eht_info->num_user_info++; + + eht_info->user_info[user_idx] |= + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS, + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED, + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O); + + ppdu_info->mcs = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS); + + ppdu_info->nss = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1; +} + +static inline bool +ath12k_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info) +{ + if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU && + usig_info->ul_dl == 1) + return true; + + return false; +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv; + + ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); + ath12k_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info); + + if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo, + ppdu_info); + else + ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo, + ppdu_info); +} + +static void +ath12k_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1; + const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2; + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126; + enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111; + u32 data; + + ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3; + ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4; + ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5; + ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6; + ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1; + ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2; + ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2; + ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1; + + switch (ppdu_info->u_sig_info.bw) { + case HAL_EHT_BW_320_2: + case HAL_EHT_BW_320_1: + data = __le32_to_cpu(eht->data[4]); + /* CC1 2::3 */ + data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3, + ru_123); + eht->data[4] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[5]); + /* CC1 2::4 */ + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4, + ru_124); + + /* CC1 2::5 */ + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5, + ru_125); + eht->data[5] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[6]); + /* CC1 2::6 */ + data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6, + ru_126); + eht->data[6] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_160: + data = __le32_to_cpu(eht->data[3]); + /* CC1 2::1 */ + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1, + ru_121); + /* CC1 2::2 */ + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2, + ru_122); + eht->data[3] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_80: + data = __le32_to_cpu(eht->data[2]); + /* CC1 1::2 */ + data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2, + ru_112); + eht->data[2] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_40: + fallthrough; + case HAL_EHT_BW_20: + data = __le32_to_cpu(eht->data[1]); + /* CC1 1::1 */ + data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1, + ru_111); + eht->data[1] = cpu_to_le32(data); + break; + default: + break; + } +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv; + + ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); + ath12k_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info); + + ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo, + ppdu_info); +} + +static void +ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info, + const void *tlv_data) +{ + ppdu_info->is_eht = true; + + if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info); + else if (ath12k_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info); + else if (ath12k_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info); +} + +static inline enum ath12k_eht_ru_size +hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size) +{ + switch (hal_ru_size) { + case HAL_EHT_RU_26: + return ATH12K_EHT_RU_26; + case HAL_EHT_RU_52: + return ATH12K_EHT_RU_52; + case HAL_EHT_RU_78: + return ATH12K_EHT_RU_52_26; + case HAL_EHT_RU_106: + return ATH12K_EHT_RU_106; + case HAL_EHT_RU_132: + return ATH12K_EHT_RU_106_26; + case HAL_EHT_RU_242: + return ATH12K_EHT_RU_242; + case HAL_EHT_RU_484: + return ATH12K_EHT_RU_484; + case HAL_EHT_RU_726: + return ATH12K_EHT_RU_484_242; + case HAL_EHT_RU_996: + return ATH12K_EHT_RU_996; + case HAL_EHT_RU_996x2: + return ATH12K_EHT_RU_996x2; + case HAL_EHT_RU_996x3: + return ATH12K_EHT_RU_996x3; + case HAL_EHT_RU_996x4: + return ATH12K_EHT_RU_996x4; + case HAL_EHT_RU_NONE: + return ATH12K_EHT_RU_INVALID; + case HAL_EHT_RU_996_484: + return ATH12K_EHT_RU_996_484; + case HAL_EHT_RU_996x2_484: + return ATH12K_EHT_RU_996x2_484; + case HAL_EHT_RU_996x3_484: + return ATH12K_EHT_RU_996x3_484; + case HAL_EHT_RU_996_484_242: + return ATH12K_EHT_RU_996_484_242; + default: + return ATH12K_EHT_RU_INVALID; + } +} + +static inline u32 +hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size) +{ + switch (ru_size) { + case ATH12K_EHT_RU_26: + return RU_26; + case ATH12K_EHT_RU_52: + return RU_52; + case ATH12K_EHT_RU_52_26: + return RU_52_26; + case ATH12K_EHT_RU_106: + return RU_106; + case ATH12K_EHT_RU_106_26: + return RU_106_26; + case ATH12K_EHT_RU_242: + return RU_242; + case ATH12K_EHT_RU_484: + return RU_484; + case ATH12K_EHT_RU_484_242: + return RU_484_242; + case ATH12K_EHT_RU_996: + return RU_996; + case ATH12K_EHT_RU_996_484: + return RU_996_484; + case ATH12K_EHT_RU_996_484_242: + return RU_996_484_242; + case ATH12K_EHT_RU_996x2: + return RU_2X996; + case ATH12K_EHT_RU_996x2_484: + return RU_2X996_484; + case ATH12K_EHT_RU_996x3: + return RU_3X996; + case ATH12K_EHT_RU_996x3_484: + return RU_3X996_484; + case ATH12K_EHT_RU_996x4: + return RU_4X996; + default: + return RU_INVALID; + } +} + +static void +ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info, + u16 user_id, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_user_status *mon_rx_user_status = NULL; + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID; + u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID; + u32 ru_type_80_0, ru_start_index_80_0; + u32 ru_type_80_1, ru_start_index_80_1; + u32 ru_type_80_2, ru_start_index_80_2; + u32 ru_type_80_3, ru_start_index_80_3; + u32 ru_size = 0, num_80mhz_with_ru = 0; + u64 ru_index_320mhz = 0; + u32 ru_index_per80mhz; + + reception_type = le32_get_bits(rx_usr_info->info0, + HAL_RX_USR_INFO0_RECEPTION_TYPE); + + switch (reception_type) { + case HAL_RECEPTION_TYPE_SU: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; + break; + case HAL_RECEPTION_TYPE_DL_MU_MIMO: + case HAL_RECEPTION_TYPE_UL_MU_MIMO: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; + break; + case HAL_RECEPTION_TYPE_DL_MU_OFMA: + case HAL_RECEPTION_TYPE_UL_MU_OFDMA: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; + break; + case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO: + case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO; + } + + ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC); + ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC); + ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM); + ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW); + ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS); + ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1; + + if (user_id < HAL_MAX_UL_MU_USERS) { + mon_rx_user_status = &ppdu_info->userstats[user_id]; + mon_rx_user_status->mcs = ppdu_info->mcs; + mon_rx_user_status->nss = ppdu_info->nss; + } + + if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)) + return; + + /* RU allocation present only for OFDMA reception */ + ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0); + ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_0); + if (ru_type_80_0 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_0; + ru_index_per80mhz = ru_start_index_80_0; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1); + ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_1); + if (ru_type_80_1 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_1; + ru_index_per80mhz = ru_start_index_80_1; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2); + ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_2); + if (ru_type_80_2 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_2; + ru_index_per80mhz = ru_start_index_80_2; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3); + ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2, + HAL_RX_USR_INFO3_RU_START_IDX_80_3); + if (ru_type_80_3 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_3; + ru_index_per80mhz = ru_start_index_80_3; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + if (num_80mhz_with_ru > 1) { + /* Calculate the MRU index */ + switch (ru_index_320mhz) { + case HAL_EHT_RU_996_484_0: + case HAL_EHT_RU_996x2_484_0: + case HAL_EHT_RU_996x3_484_0: + ru_index = 0; + break; + case HAL_EHT_RU_996_484_1: + case HAL_EHT_RU_996x2_484_1: + case HAL_EHT_RU_996x3_484_1: + ru_index = 1; + break; + case HAL_EHT_RU_996_484_2: + case HAL_EHT_RU_996x2_484_2: + case HAL_EHT_RU_996x3_484_2: + ru_index = 2; + break; + case HAL_EHT_RU_996_484_3: + case HAL_EHT_RU_996x2_484_3: + case HAL_EHT_RU_996x3_484_3: + ru_index = 3; + break; + case HAL_EHT_RU_996_484_4: + case HAL_EHT_RU_996x2_484_4: + case HAL_EHT_RU_996x3_484_4: + ru_index = 4; + break; + case HAL_EHT_RU_996_484_5: + case HAL_EHT_RU_996x2_484_5: + case HAL_EHT_RU_996x3_484_5: + ru_index = 5; + break; + case HAL_EHT_RU_996_484_6: + case HAL_EHT_RU_996x2_484_6: + case HAL_EHT_RU_996x3_484_6: + ru_index = 6; + break; + case HAL_EHT_RU_996_484_7: + case HAL_EHT_RU_996x2_484_7: + case HAL_EHT_RU_996x3_484_7: + ru_index = 7; + break; + case HAL_EHT_RU_996x2_484_8: + ru_index = 8; + break; + case HAL_EHT_RU_996x2_484_9: + ru_index = 9; + break; + case HAL_EHT_RU_996x2_484_10: + ru_index = 10; + break; + case HAL_EHT_RU_996x2_484_11: + ru_index = 11; + break; + default: + ru_index = HAL_EHT_RU_INVALID; + break; + } + + ru_size += 4; + } + + rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size); + if (rtap_ru_size != ATH12K_EHT_RU_INVALID) { + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[1]); + data |= u32_encode_bits(rtap_ru_size, + IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE); + eht->data[1] = cpu_to_le32(data); + } + + if (ru_index != HAL_EHT_RU_INVALID) { + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[1]); + data |= u32_encode_bits(rtap_ru_size, + IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX); + eht->data[1] = cpu_to_le32(data); + } + + if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID && + rtap_ru_size != ATH12K_EHT_RU_INVALID) { + mon_rx_user_status->ul_ofdma_ru_start_index = ru_index; + mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size; + + ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size); + + mon_rx_user_status->ul_ofdma_ru_width = ru_width; + mon_rx_user_status->ofdma_info_valid = 1; + } +} + +static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap) +{ + if (info & RX_MSDU_END_INFO13_FCS_ERR) + *errmap |= HAL_RX_MPDU_ERR_FCS; + + if (info & RX_MSDU_END_INFO13_DECRYPT_ERR) + *errmap |= HAL_RX_MPDU_ERR_DECRYPT; + + if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR) + *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC; + + if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR) + *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR; + + if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR) + *errmap |= HAL_RX_MPDU_ERR_OVERFLOW; + + if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR) + *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN; + + if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR) + *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN; +} + +static void +ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon, + const struct hal_rx_msdu_end *msdu_end) +{ + ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2), + &pmon->err_bitmap); + pmon->decap_format = le32_get_bits(msdu_end->info1, + RX_MSDU_END_INFO11_DECAP_FORMAT); } static enum hal_rx_mon_status -ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, +ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar, struct ath12k_mon_data *pmon, - u32 tlv_tag, const void *tlv_data, - u32 userid) + const struct hal_tlv_64_hdr *tlv) { struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; - u32 info[7]; + const void *tlv_data = tlv->value; + u32 info[7], userid; + u16 tlv_tag, tlv_len; + + tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); + userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); + + if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) { + ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf); + + ppdu_info->tlv_aggr.in_progress = false; + ppdu_info->tlv_aggr.cur_len = 0; + } switch (tlv_tag) { case HAL_RX_PPDU_START: { @@ -638,6 +1538,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, ppdu_info->num_mpdu_fcs_err = u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR); + ppdu_info->peer_id = + u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID); + switch (ppdu_info->preamble_type) { case HAL_RX_PREAMBLE_11N: ppdu_info->ht_flags = 1; @@ -648,6 +1551,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, case HAL_RX_PREAMBLE_11AX: ppdu_info->he_flags = 1; break; + case HAL_RX_PREAMBLE_11BE: + ppdu_info->is_eht = true; + break; default: break; } @@ -655,6 +1561,11 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, if (userid < HAL_MAX_UL_MU_USERS) { struct hal_rx_user_status *rxuser_stats = &ppdu_info->userstats[userid]; + + if (ppdu_info->num_mpdu_fcs_ok > 1 || + ppdu_info->num_mpdu_fcs_err > 1) + ppdu_info->userstats[userid].ampdu_present = true; + ppdu_info->num_users += 1; ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats); @@ -730,6 +1641,17 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW); break; } + case HAL_PHYRX_OTHER_RECEIVE_INFO: { + const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data; + + ppdu_info->gi = le32_get_bits(cmn_usr_info->info0, + HAL_RX_PHY_CMN_USER_INFO0_GI); + break; + } + case HAL_RX_PPDU_START_USER_INFO: + ath12k_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info); + break; + case HAL_RXPCU_PPDU_END_INFO: { const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data; @@ -743,7 +1665,6 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, } case HAL_RX_MPDU_START: { const struct hal_rx_mpdu_start *mpdu_start = tlv_data; - struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; u16 peer_id; info[1] = __le32_to_cpu(mpdu_start->info1); @@ -756,68 +1677,39 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, if (userid < HAL_MAX_UL_MU_USERS) { info[0] = __le32_to_cpu(mpdu_start->info0); ppdu_info->userid = userid; - ppdu_info->ampdu_id[userid] = - u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID); + ppdu_info->userstats[userid].ampdu_id = + u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID); } - mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); - if (!mon_mpdu) - return HAL_RX_MON_STATUS_PPDU_NOT_DONE; - - break; + return HAL_RX_MON_STATUS_MPDU_START; } case HAL_RX_MSDU_START: /* TODO: add msdu start parsing logic */ break; - case HAL_MON_BUF_ADDR: { - struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; - const struct dp_mon_packet_info *packet_info = tlv_data; - int buf_id = u32_get_bits(packet_info->cookie, - DP_RXDMA_BUF_COOKIE_BUF_ID); - struct sk_buff *msdu; - struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; - struct ath12k_skb_rxcb *rxcb; - - spin_lock_bh(&buf_ring->idr_lock); - msdu = idr_remove(&buf_ring->bufs_idr, buf_id); - spin_unlock_bh(&buf_ring->idr_lock); - - if (unlikely(!msdu)) { - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", - buf_id); - return HAL_RX_MON_STATUS_PPDU_NOT_DONE; + case HAL_MON_BUF_ADDR: + return HAL_RX_MON_STATUS_BUF_ADDR; + case HAL_RX_MSDU_END: + ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data); + return HAL_RX_MON_STATUS_MSDU_END; + case HAL_RX_MPDU_END: + return HAL_RX_MON_STATUS_MPDU_END; + case HAL_PHYRX_GENERIC_U_SIG: + ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info); + break; + case HAL_PHYRX_GENERIC_EHT_SIG: + /* Handle the case where aggregation is in progress + * or the current TLV is one of the TLVs which should be + * aggregated + */ + if (!ppdu_info->tlv_aggr.in_progress) { + ppdu_info->tlv_aggr.in_progress = true; + ppdu_info->tlv_aggr.tlv_tag = tlv_tag; + ppdu_info->tlv_aggr.cur_len = 0; } - rxcb = ATH12K_SKB_RXCB(msdu); - dma_unmap_single(ab->dev, rxcb->paddr, - msdu->len + skb_tailroom(msdu), - DMA_FROM_DEVICE); - - if (mon_mpdu->tail) - mon_mpdu->tail->next = msdu; - else - mon_mpdu->tail = msdu; - - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + ppdu_info->is_eht = true; - break; - } - case HAL_RX_MSDU_END: { - const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data; - bool is_first_msdu_in_mpdu; - u16 msdu_end_info; - - msdu_end_info = __le16_to_cpu(msdu_end->info5); - is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info, - RX_MSDU_END_INFO5_FIRST_MSDU); - if (is_first_msdu_in_mpdu) { - pmon->mon_mpdu->head = pmon->mon_mpdu->tail; - pmon->mon_mpdu->tail = NULL; - } - break; - } - case HAL_RX_MPDU_END: - list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list); + ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data); break; case HAL_DUMMY: return HAL_RX_MON_STATUS_BUF_DONE; @@ -831,45 +1723,295 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, return HAL_RX_MON_STATUS_PPDU_NOT_DONE; } +static void +ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct ieee80211_rx_status *rx_status) +{ + u32 center_freq = ppdu_info->freq; + + rx_status->freq = center_freq; + rx_status->bw = ath12k_mac_bw_to_mac80211_bw(ppdu_info->bw); + rx_status->nss = ppdu_info->nss; + rx_status->rate_idx = 0; + rx_status->encoding = RX_ENC_LEGACY; + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; + + if (center_freq >= ATH12K_MIN_6GHZ_FREQ && + center_freq <= ATH12K_MAX_6GHZ_FREQ) { + rx_status->band = NL80211_BAND_6GHZ; + } else if (center_freq >= ATH12K_MIN_2GHZ_FREQ && + center_freq <= ATH12K_MAX_2GHZ_FREQ) { + rx_status->band = NL80211_BAND_2GHZ; + } else if (center_freq >= ATH12K_MIN_5GHZ_FREQ && + center_freq <= ATH12K_MAX_5GHZ_FREQ) { + rx_status->band = NL80211_BAND_5GHZ; + } else { + rx_status->band = NUM_NL80211_BANDS; + } +} + +static struct sk_buff +*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring, + int *buf_id) +{ + struct sk_buff *skb; + dma_addr_t paddr; + + skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE); + + if (!skb) + goto fail_alloc_skb; + + if (!IS_ALIGNED((unsigned long)skb->data, + RX_MON_STATUS_BUF_ALIGN)) { + skb_pull(skb, PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ab->dev, paddr))) + goto fail_free_skb; + + spin_lock_bh(&rx_ring->idr_lock); + *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, + rx_ring->bufs_max, GFP_ATOMIC); + spin_unlock_bh(&rx_ring->idr_lock); + if (*buf_id < 0) + goto fail_dma_unmap; + + ATH12K_SKB_RXCB(skb)->paddr = paddr; + return skb; + +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); +fail_free_skb: + dev_kfree_skb_any(skb); +fail_alloc_skb: + return NULL; +} + +static enum dp_mon_status_buf_state +ath12k_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng, + struct dp_rxdma_mon_ring *rx_ring) +{ + struct ath12k_skb_rxcb *rxcb; + struct hal_tlv_64_hdr *tlv; + struct sk_buff *skb; + void *status_desc; + dma_addr_t paddr; + u32 cookie; + int buf_id; + u8 rbm; + + status_desc = ath12k_hal_srng_src_next_peek(ab, srng); + if (!status_desc) + return DP_MON_STATUS_NO_DMA; + + ath12k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); + + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) + return DP_MON_STATUS_NO_DMA; + + rxcb = ATH12K_SKB_RXCB(skb); + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_64_hdr *)skb->data; + if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE) + return DP_MON_STATUS_NO_DMA; + + return DP_MON_STATUS_REPLINISH; +} + +static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id) +{ + u32 ret = 0; + + if ((*ppdu_id < msdu_ppdu_id) && + ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { + /* Hold on mon dest ring, and reap mon status ring. */ + *ppdu_id = msdu_ppdu_id; + ret = msdu_ppdu_id; + } else if ((*ppdu_id > msdu_ppdu_id) && + ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { + /* PPDU ID has exceeded the maximum value and will + * restart from 0. + */ + *ppdu_id = msdu_ppdu_id; + ret = msdu_ppdu_id; + } + return ret; +} + +static +void ath12k_dp_mon_next_link_desc_get(struct hal_rx_msdu_link *msdu_link, + dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm, + struct ath12k_buffer_addr **pp_buf_addr_info) +{ + struct ath12k_buffer_addr *buf_addr_info; + + buf_addr_info = &msdu_link->buf_addr_info; + + ath12k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); + + *pp_buf_addr_info = buf_addr_info; +} + +static void +ath12k_dp_mon_fill_rx_rate(struct ath12k *ar, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_supported_band *sband; + enum rx_msdu_start_pkt_type pkt_type; + u8 rate_mcs, nss, sgi; + bool is_cck; + + pkt_type = ppdu_info->preamble_type; + rate_mcs = ppdu_info->rate; + nss = ppdu_info->nss; + sgi = ppdu_info->gi; + + switch (pkt_type) { + case RX_MSDU_START_PKT_TYPE_11A: + case RX_MSDU_START_PKT_TYPE_11B: + is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); + if (rx_status->band < NUM_NL80211_BANDS) { + sband = &ar->mac.sbands[rx_status->band]; + rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs, + is_cck); + } + break; + case RX_MSDU_START_PKT_TYPE_11N: + rx_status->encoding = RX_ENC_HT; + if (rate_mcs > ATH12K_HT_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in HT mode %d\n", + rate_mcs); + break; + } + rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); + if (sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + break; + case RX_MSDU_START_PKT_TYPE_11AC: + rx_status->encoding = RX_ENC_VHT; + rx_status->rate_idx = rate_mcs; + if (rate_mcs > ATH12K_VHT_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in VHT mode %d\n", + rate_mcs); + break; + } + if (sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + break; + case RX_MSDU_START_PKT_TYPE_11AX: + rx_status->rate_idx = rate_mcs; + if (rate_mcs > ATH12K_HE_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in HE mode %d\n", + rate_mcs); + break; + } + rx_status->encoding = RX_ENC_HE; + rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); + break; + case RX_MSDU_START_PKT_TYPE_11BE: + rx_status->rate_idx = rate_mcs; + if (rate_mcs > ATH12K_EHT_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in EHT mode %d\n", + rate_mcs); + break; + } + rx_status->encoding = RX_ENC_EHT; + rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); + break; + default: + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "monitor receives invalid preamble type %d", + pkt_type); + break; + } +} + static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *head_msdu, struct sk_buff *tail_msdu) { - u32 rx_pkt_offset, l2_hdr_offset; + u32 rx_pkt_offset, l2_hdr_offset, total_offset; rx_pkt_offset = ar->ab->hal.hal_desc_sz; l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data); - skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset); + + if (ar->ab->hw_params->rxdma1_enable) + total_offset = ATH12K_MON_RX_PKT_OFFSET; + else + total_offset = rx_pkt_offset + l2_hdr_offset; + + skb_pull(head_msdu, total_offset); } static struct sk_buff * -ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, - struct sk_buff *head_msdu, struct sk_buff *tail_msdu, - struct ieee80211_rx_status *rxs, bool *fcs_err) +ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, + struct dp_mon_mpdu *mon_mpdu, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct ieee80211_rx_status *rxs) { struct ath12k_base *ab = ar->ab; struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list; - struct hal_rx_desc *rx_desc, *tail_rx_desc; - u8 *hdr_desc, *dest, decap_format; + struct sk_buff *head_msdu, *tail_msdu; + struct hal_rx_desc *rx_desc; + u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format; struct ieee80211_hdr_3addr *wh; - u32 err_bitmap, frag_list_sum_len = 0; + struct ieee80211_channel *channel; + u32 frag_list_sum_len = 0; + u8 channel_num = ppdu_info->chan_num; mpdu_buf = NULL; + head_msdu = mon_mpdu->head; + tail_msdu = mon_mpdu->tail; - if (!head_msdu) + if (!head_msdu || !tail_msdu) goto err_merge_fail; - rx_desc = (struct hal_rx_desc *)head_msdu->data; - tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data; + ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs); - err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc); - if (err_bitmap & HAL_RX_MPDU_ERR_FCS) - *fcs_err = true; + if (unlikely(rxs->band == NUM_NL80211_BANDS || + !ath12k_ar_to_hw(ar)->wiphy->bands[rxs->band])) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n", + rxs->band, channel_num, ppdu_info->freq, ar->pdev_idx); - decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc); + spin_lock_bh(&ar->data_lock); + channel = ar->rx_channel; + if (channel) { + rxs->band = channel->band; + channel_num = + ieee80211_frequency_to_channel(channel->center_freq); + } + spin_unlock_bh(&ar->data_lock); + } - ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs); + if (rxs->band < NUM_NL80211_BANDS) + rxs->freq = ieee80211_channel_to_frequency(channel_num, + rxs->band); + + ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); @@ -879,7 +2021,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, head_frag_list = NULL; while (msdu) { - ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); if (!head_frag_list) head_frag_list = msdu; @@ -891,7 +2033,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, prev_buf->next = NULL; - skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); + skb_trim(prev_buf, prev_buf->len); if (head_frag_list) { skb_shinfo(head_msdu)->frag_list = head_frag_list; head_msdu->data_len = frag_list_sum_len; @@ -914,7 +2056,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, msdu = head_msdu; while (msdu) { - ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); if (!dest) @@ -1005,18 +2147,70 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar, { struct ieee80211_supported_band *sband; u8 *ptr = NULL; - u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid]; rxs->flag |= RX_FLAG_MACTIME_START; rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; rxs->nss = ppduinfo->nss + 1; - if (ampdu_id) { + if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) { rxs->flag |= RX_FLAG_AMPDU_DETAILS; - rxs->ampdu_reference = ampdu_id; + rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id; } - if (ppduinfo->he_mu_flags) { + if (ppduinfo->is_eht || ppduinfo->eht_usig) { + struct ieee80211_radiotap_tlv *tlv; + struct ieee80211_radiotap_eht *eht; + struct ieee80211_radiotap_eht_usig *usig; + u16 len = 0, i, eht_len, usig_len; + u8 user; + + if (ppduinfo->is_eht) { + eht_len = struct_size(eht, + user_info, + ppduinfo->eht_info.num_user_info); + len += sizeof(*tlv) + eht_len; + } + + if (ppduinfo->eht_usig) { + usig_len = sizeof(*usig); + len += sizeof(*tlv) + usig_len; + } + + rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + rxs->encoding = RX_ENC_EHT; + + skb_reset_mac_header(mon_skb); + + tlv = skb_push(mon_skb, len); + + if (ppduinfo->is_eht) { + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT); + tlv->len = cpu_to_le16(eht_len); + + eht = (struct ieee80211_radiotap_eht *)tlv->data; + eht->known = ppduinfo->eht_info.eht.known; + + for (i = 0; + i < ARRAY_SIZE(eht->data) && + i < ARRAY_SIZE(ppduinfo->eht_info.eht.data); + i++) + eht->data[i] = ppduinfo->eht_info.eht.data[i]; + + for (user = 0; user < ppduinfo->eht_info.num_user_info; user++) + put_unaligned_le32(ppduinfo->eht_info.user_info[user], + &eht->user_info[user]); + + tlv = (struct ieee80211_radiotap_tlv *)&tlv->data[eht_len]; + } + + if (ppduinfo->eht_usig) { + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG); + tlv->len = cpu_to_le16(usig_len); + + usig = (struct ieee80211_radiotap_eht_usig *)tlv->data; + *usig = ppduinfo->u_sig_info.usig; + } + } else if (ppduinfo->he_mu_flags) { rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; rxs->encoding = RX_ENC_HE; ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); @@ -1045,7 +2239,8 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar, static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi, struct sk_buff *msdu, - struct ieee80211_rx_status *status) + struct ieee80211_rx_status *status, + u8 decap) { static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | @@ -1057,10 +2252,12 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct struct ieee80211_sta *pubsta = NULL; struct ath12k_peer *peer; struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu); - u8 decap = DP_RX_DECAP_TYPE_RAW; + struct ath12k_dp_rx_info rx_info; bool is_mcbc = rxcb->is_mcbc; bool is_eapol_tkip = rxcb->is_eapol; + status->link_valid = 0; + if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) && !(status->flag & RX_FLAG_SKIP_MONITOR)) { he = skb_push(msdu, sizeof(known)); @@ -1068,10 +2265,9 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct status->flag |= RX_FLAG_RADIOTAP_HE; } - if (!(status->flag & RX_FLAG_ONLY_MONITOR)) - decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc); spin_lock_bh(&ar->ab->base_lock); - peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu); + rx_info.addr2_present = false; + peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info); if (peer && peer->sta) { pubsta = peer->sta; if (pubsta->valid_links) { @@ -1125,26 +2321,24 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); } -static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, - struct sk_buff *head_msdu, struct sk_buff *tail_msdu, +static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, + struct dp_mon_mpdu *mon_mpdu, struct hal_rx_mon_ppdu_info *ppduinfo, struct napi_struct *napi) { struct ath12k_pdev_dp *dp = &ar->dp; struct sk_buff *mon_skb, *skb_next, *header; struct ieee80211_rx_status *rxs = &dp->rx_status; - bool fcs_err = false; + u8 decap = DP_RX_DECAP_TYPE_RAW; - mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, - head_msdu, tail_msdu, - rxs, &fcs_err); + mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mon_mpdu, ppduinfo, rxs); if (!mon_skb) goto mon_deliver_fail; header = mon_skb; rxs->flag = 0; - if (fcs_err) + if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS) rxs->flag = RX_FLAG_FAILED_FCS_CRC; do { @@ -1161,8 +2355,12 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, rxs->flag |= RX_FLAG_ALLOW_SAME_PN; } rxs->flag |= RX_FLAG_ONLY_MONITOR; + + if (!(rxs->flag & RX_FLAG_ONLY_MONITOR)) + decap = mon_mpdu->decap_format; + ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs); - ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs); + ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap); mon_skb = skb_next; } while (mon_skb); rxs->flag = 0; @@ -1170,7 +2368,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, return 0; mon_deliver_fail: - mon_skb = head_msdu; + mon_skb = mon_mpdu->head; while (mon_skb) { skb_next = mon_skb->next; dev_kfree_skb_any(mon_skb); @@ -1179,25 +2377,146 @@ mon_deliver_fail: return -EINVAL; } +static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) +{ + if (skb->len > len) { + skb_trim(skb, len); + } else { + if (skb_tailroom(skb) < len - skb->len) { + if ((pskb_expand_head(skb, 0, + len - skb->len - skb_tailroom(skb), + GFP_ATOMIC))) { + return -ENOMEM; + } + } + skb_put(skb, (len - skb->len)); + } + + return 0; +} + +/* Hardware fill buffer with 128 bytes aligned. So need to reap it + * with 128 bytes aligned. + */ +#define RXDMA_DATA_DMA_BLOCK_SIZE 128 + +static void +ath12k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, + bool *is_frag, u32 *total_len, + u32 *frag_len, u32 *msdu_cnt) +{ + if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { + *is_frag = true; + *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE - + sizeof(struct hal_rx_desc)) & + ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1); + *total_len += *frag_len; + } else { + if (*is_frag) + *frag_len = info->msdu_len - *total_len; + else + *frag_len = info->msdu_len; + + *msdu_cnt -= 1; + } +} + +static int +ath12k_dp_mon_parse_status_buf(struct ath12k *ar, + struct ath12k_mon_data *pmon, + const struct dp_mon_packet_info *packet_info) +{ + struct ath12k_base *ab = ar->ab; + struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; + struct sk_buff *msdu; + int buf_id; + u32 offset; + + buf_id = u32_get_bits(packet_info->cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&buf_ring->idr_lock); + msdu = idr_remove(&buf_ring->bufs_idr, buf_id); + spin_unlock_bh(&buf_ring->idr_lock); + + if (unlikely(!msdu)) { + ath12k_warn(ab, "mon dest desc with inval buf_id %d\n", buf_id); + return 0; + } + + dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(msdu)->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + + offset = packet_info->dma_length + ATH12K_MON_RX_DOT11_OFFSET; + if (ath12k_dp_pkt_set_pktlen(msdu, offset)) { + dev_kfree_skb_any(msdu); + goto dest_replenish; + } + + if (!pmon->mon_mpdu->head) + pmon->mon_mpdu->head = msdu; + else + pmon->mon_mpdu->tail->next = msdu; + + pmon->mon_mpdu->tail = msdu; + +dest_replenish: + ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + + return 0; +} + +static int +ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar, + struct ath12k_mon_data *pmon, + enum hal_rx_mon_status hal_status, + const void *tlv_data) +{ + switch (hal_status) { + case HAL_RX_MON_STATUS_MPDU_START: + if (WARN_ON_ONCE(pmon->mon_mpdu)) + break; + + pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC); + if (!pmon->mon_mpdu) + return -ENOMEM; + break; + case HAL_RX_MON_STATUS_BUF_ADDR: + return ath12k_dp_mon_parse_status_buf(ar, pmon, tlv_data); + case HAL_RX_MON_STATUS_MPDU_END: + /* If no MSDU then free empty MPDU */ + if (pmon->mon_mpdu->tail) { + pmon->mon_mpdu->tail->next = NULL; + list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list); + } else { + kfree(pmon->mon_mpdu); + } + pmon->mon_mpdu = NULL; + break; + case HAL_RX_MON_STATUS_MSDU_END: + pmon->mon_mpdu->decap_format = pmon->decap_format; + pmon->mon_mpdu->err_bitmap = pmon->err_bitmap; + break; + default: + break; + } + + return 0; +} + static enum hal_rx_mon_status -ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon, +ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon, struct sk_buff *skb) { - struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; struct hal_tlv_64_hdr *tlv; + struct ath12k_skb_rxcb *rxcb; enum hal_rx_mon_status hal_status; - u32 tlv_userid; u16 tlv_tag, tlv_len; u8 *ptr = skb->data; - memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info)); - do { tlv = (struct hal_tlv_64_hdr *)ptr; tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); - tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); - tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); - ptr += sizeof(*tlv); /* The actual length of PPDU_END is the combined length of many PHY * TLVs that follow. Skip the TLV header and @@ -1207,16 +2526,30 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon if (tlv_tag == HAL_RX_PPDU_END) tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview); + else + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); - hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon, - tlv_tag, ptr, tlv_userid); - ptr += tlv_len; + hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv); + + if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable && + ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value)) + return HAL_RX_MON_STATUS_PPDU_DONE; + + ptr += sizeof(*tlv) + tlv_len; ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN); - if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE) + if ((ptr - skb->data) > skb->len) break; - } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE); + } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) || + (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) || + (hal_status == HAL_RX_MON_STATUS_MPDU_START) || + (hal_status == HAL_RX_MON_STATUS_MPDU_END) || + (hal_status == HAL_RX_MON_STATUS_MSDU_END)); + + rxcb = ATH12K_SKB_RXCB(skb); + if (rxcb->is_end_of_ppdu) + hal_status = HAL_RX_MON_STATUS_PPDU_DONE; return hal_status; } @@ -1224,31 +2557,27 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon enum hal_rx_mon_status ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, struct ath12k_mon_data *pmon, - int mac_id, struct sk_buff *skb, struct napi_struct *napi) { - struct ath12k_base *ab = ar->ab; struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; struct dp_mon_mpdu *tmp; struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; - struct sk_buff *head_msdu, *tail_msdu; - enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE; + enum hal_rx_mon_status hal_status; - ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); + hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb); + if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) + return hal_status; list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) { list_del(&mon_mpdu->list); - head_msdu = mon_mpdu->head; - tail_msdu = mon_mpdu->tail; - if (head_msdu && tail_msdu) { - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, - tail_msdu, ppdu_info, napi); - } + if (mon_mpdu->head && mon_mpdu->tail) + ath12k_dp_mon_rx_deliver(ar, mon_mpdu, ppdu_info, napi); kfree(mon_mpdu); } + return hal_status; } @@ -1327,6 +2656,94 @@ fail_alloc_skb: return -ENOMEM; } +int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab, + struct dp_rxdma_mon_ring *rx_ring, + int req_entries) +{ + enum hal_rx_buf_return_buf_manager mgr = + ab->hw_params->hal_params->rx_buf_rbm; + int num_free, num_remain, buf_id; + struct ath12k_buffer_addr *desc; + struct hal_srng *srng; + struct sk_buff *skb; + dma_addr_t paddr; + u32 cookie; + + req_entries = min(req_entries, rx_ring->bufs_max); + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + num_free = ath12k_hal_srng_src_num_free(ab, srng, true); + if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) + req_entries = num_free; + + req_entries = min(num_free, req_entries); + num_remain = req_entries; + + while (num_remain > 0) { + skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE); + if (!skb) + break; + + if (!IS_ALIGNED((unsigned long)skb->data, + RX_MON_STATUS_BUF_ALIGN)) { + skb_pull(skb, + PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (dma_mapping_error(ab->dev, paddr)) + goto fail_free_skb; + + spin_lock_bh(&rx_ring->idr_lock); + buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, + rx_ring->bufs_max * 3, GFP_ATOMIC); + spin_unlock_bh(&rx_ring->idr_lock); + if (buf_id < 0) + goto fail_dma_unmap; + cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); + + desc = ath12k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) + goto fail_buf_unassign; + + ATH12K_SKB_RXCB(skb)->paddr = paddr; + + num_remain--; + + ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); + } + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; + +fail_buf_unassign: + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); +fail_free_skb: + dev_kfree_skb_any(skb); + + ath12k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; +} + static struct dp_mon_tx_ppdu_info * ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon, unsigned int ppdu_id, @@ -1924,21 +3341,18 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag, } static void -ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, +ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, struct napi_struct *napi, struct dp_mon_tx_ppdu_info *tx_ppdu_info) { struct dp_mon_mpdu *tmp, *mon_mpdu; - struct sk_buff *head_msdu, *tail_msdu; list_for_each_entry_safe(mon_mpdu, tmp, &tx_ppdu_info->dp_tx_mon_mpdu_list, list) { list_del(&mon_mpdu->list); - head_msdu = mon_mpdu->head; - tail_msdu = mon_mpdu->tail; - if (head_msdu) - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu, + if (mon_mpdu->head) + ath12k_dp_mon_rx_deliver(ar, mon_mpdu, &tx_ppdu_info->rx_status, napi); kfree(mon_mpdu); @@ -1948,7 +3362,6 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, enum hal_rx_mon_status ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, struct ath12k_mon_data *pmon, - int mac_id, struct sk_buff *skb, struct napi_struct *napi, u32 ppdu_id) @@ -1995,155 +3408,43 @@ ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, break; } while (tlv_status != DP_MON_TX_FES_STATUS_END); - ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info); - ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info); + ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_data_ppdu_info); + ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_prot_ppdu_info); return tlv_status; } -int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, - enum dp_monitor_mode monitor_mode, - struct napi_struct *napi) -{ - struct hal_mon_dest_desc *mon_dst_desc; - struct ath12k_pdev_dp *pdev_dp = &ar->dp; - struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data; - struct ath12k_base *ab = ar->ab; - struct ath12k_dp *dp = &ab->dp; - struct sk_buff *skb; - struct ath12k_skb_rxcb *rxcb; - struct dp_srng *mon_dst_ring; - struct hal_srng *srng; - struct dp_rxdma_mon_ring *buf_ring; - u64 cookie; - u32 ppdu_id; - int num_buffs_reaped = 0, srng_id, buf_id; - u8 dest_idx = 0, i; - bool end_of_ppdu; - struct hal_rx_mon_ppdu_info *ppdu_info; - struct ath12k_peer *peer = NULL; - - ppdu_info = &pmon->mon_ppdu_info; - memset(ppdu_info, 0, sizeof(*ppdu_info)); - ppdu_info->peer_id = HAL_INVALID_PEERID; - - srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); - - if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) { - mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; - buf_ring = &dp->rxdma_mon_buf_ring; - } else { - return 0; - } - - srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; - - spin_lock_bh(&srng->lock); - ath12k_hal_srng_access_begin(ab, srng); - - while (likely(*budget)) { - *budget -= 1; - mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); - if (unlikely(!mon_dst_desc)) - break; - - cookie = le32_to_cpu(mon_dst_desc->cookie); - buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); - - spin_lock_bh(&buf_ring->idr_lock); - skb = idr_remove(&buf_ring->bufs_idr, buf_id); - spin_unlock_bh(&buf_ring->idr_lock); - - if (unlikely(!skb)) { - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", - buf_id); - goto move_next; - } - - rxcb = ATH12K_SKB_RXCB(skb); - dma_unmap_single(ab->dev, rxcb->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - - pmon->dest_skb_q[dest_idx] = skb; - dest_idx++; - ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id); - end_of_ppdu = le32_get_bits(mon_dst_desc->info0, - HAL_MON_DEST_INFO0_END_OF_PPDU); - if (!end_of_ppdu) - continue; - - for (i = 0; i < dest_idx; i++) { - skb = pmon->dest_skb_q[i]; - - if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) - ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id, - skb, napi); - else - ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id, - skb, napi, ppdu_id); - - peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); - - if (!peer || !peer->sta) { - ath12k_dbg(ab, ATH12K_DBG_DATA, - "failed to find the peer with peer_id %d\n", - ppdu_info->peer_id); - dev_kfree_skb_any(skb); - continue; - } - - dev_kfree_skb_any(skb); - pmon->dest_skb_q[i] = NULL; - } - - dest_idx = 0; -move_next: - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); - ath12k_hal_srng_src_get_next_entry(ab, srng); - num_buffs_reaped++; - } - - ath12k_hal_srng_access_end(ab, srng); - spin_unlock_bh(&srng->lock); - - return num_buffs_reaped; -} - static void ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats, struct hal_rx_mon_ppdu_info *ppdu_info, struct hal_rx_user_status *user_stats, u32 num_msdu) { - u32 rate_idx = 0; + struct ath12k_rx_peer_rate_stats *stats; u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs; u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1; u32 bw_idx = ppdu_info->bw; u32 gi_idx = ppdu_info->gi; + u32 len; - if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) || - (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) { + if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS || + bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) { return; } - if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N || - ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) { - rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx; - rate_idx += bw_idx * 2 + gi_idx; - } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) { + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX || + ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE) gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi); - rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx; - rate_idx += bw_idx * 3 + gi_idx; - } else { - return; - } - rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu; + rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu; + stats = &rx_stats->byte_stats; + if (user_stats) - rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count; + len = user_stats->mpdu_ok_byte_count; else - rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len; + len = ppdu_info->mpdu_len; + + stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len; } static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, @@ -2153,11 +3454,11 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats; u32 num_msdu; + arsta->rssi_comb = ppdu_info->rssi_comb; + ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); if (!rx_stats) return; - arsta->rssi_comb = ppdu_info->rssi_comb; - num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; @@ -2229,6 +3530,12 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; } + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE && + ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) { + rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu; + rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; + } + if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) && ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) { @@ -2323,13 +3630,12 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, ahsta = ath12k_sta_to_ahsta(peer->sta); arsta = &ahsta->deflink; + arsta->rssi_comb = ppdu_info->rssi_comb; + ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); rx_stats = arsta->rx_stats; - if (!rx_stats) return; - arsta->rssi_comb = ppdu_info->rssi_comb; - num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count + user_stats->udp_msdu_count + user_stats->other_msdu_count; @@ -2415,8 +3721,15 @@ ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar, ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i); } -int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, - struct napi_struct *napi, int *budget) +static void +ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info) +{ + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; +} + +int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, + struct napi_struct *napi) { struct ath12k_base *ab = ar->ab; struct ath12k_pdev_dp *pdev_dp = &ar->dp; @@ -2432,13 +3745,14 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, struct ath12k_sta *ahsta = NULL; struct ath12k_link_sta *arsta; struct ath12k_peer *peer; + struct sk_buff_head skb_list; u64 cookie; int num_buffs_reaped = 0, srng_id, buf_id; - u8 dest_idx = 0, i; - bool end_of_ppdu; - u32 hal_status; + u32 hal_status, end_offset, info0, end_reason; + u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, ar->pdev_idx); - srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); + __skb_queue_head_init(&skb_list); + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx); mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; buf_ring = &dp->rxdma_mon_buf_ring; @@ -2451,6 +3765,15 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); if (unlikely(!mon_dst_desc)) break; + + /* In case of empty descriptor, the cookie in the ring descriptor + * is invalid. Therefore, this entry is skipped, and ring processing + * continues. + */ + info0 = le32_to_cpu(mon_dst_desc->info0); + if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC)) + goto move_next; + cookie = le32_to_cpu(mon_dst_desc->cookie); buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); @@ -2468,63 +3791,583 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - pmon->dest_skb_q[dest_idx] = skb; - dest_idx++; - end_of_ppdu = le32_get_bits(mon_dst_desc->info0, - HAL_MON_DEST_INFO0_END_OF_PPDU); - if (!end_of_ppdu) + + end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON); + + /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of + * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got + * truncated due to a system level error. In both the cases, buffer data + * can be discarded + */ + if ((end_reason == HAL_MON_FLUSH_DETECTED) || + (end_reason == HAL_MON_PPDU_TRUNCATED)) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Monitor dest descriptor end reason %d", end_reason); + dev_kfree_skb_any(skb); + goto move_next; + } + + /* Calculate the budget when the ring descriptor with the + * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always + * reaped. This helps to efficiently utilize the NAPI budget. + */ + if (end_reason == HAL_MON_END_OF_PPDU) { + *budget -= 1; + rxcb->is_end_of_ppdu = true; + } + + end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET); + if (likely(end_offset <= DP_RX_BUFFER_SIZE)) { + skb_put(skb, end_offset); + } else { + ath12k_warn(ab, + "invalid offset on mon stats destination %u\n", + end_offset); + skb_put(skb, DP_RX_BUFFER_SIZE); + } + + __skb_queue_tail(&skb_list, skb); + +move_next: + ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + ath12k_hal_srng_dst_get_next_entry(ab, srng); + num_buffs_reaped++; + } + + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + if (!num_buffs_reaped) + return 0; + + /* In some cases, one PPDU worth of data can be spread across multiple NAPI + * schedules, To avoid losing existing parsed ppdu_info information, skip + * the memset of the ppdu_info structure and continue processing it. + */ + if (!ppdu_info->ppdu_continuation) + ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info); + + while ((skb = __skb_dequeue(&skb_list))) { + hal_status = ath12k_dp_mon_rx_parse_mon_status(ar, pmon, skb, napi); + if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { + ppdu_info->ppdu_continuation = true; + dev_kfree_skb_any(skb); continue; + } + + if (ppdu_info->peer_id == HAL_INVALID_PEERID) + goto free_skb; + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); + if (!peer || !peer->sta) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "failed to find the peer with monitor peer_id %d\n", + ppdu_info->peer_id); + goto next_skb; + } + + if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { + ahsta = ath12k_sta_to_ahsta(peer->sta); + arsta = &ahsta->deflink; + ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, + ppdu_info); + } else if ((ppdu_info->fc_valid) && + (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { + ath12k_dp_mon_rx_process_ulofdma(ppdu_info); + ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info); + } + +next_skb: + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); +free_skb: + dev_kfree_skb_any(skb); + ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info); + } - for (i = 0; i < dest_idx; i++) { - skb = pmon->dest_skb_q[i]; - hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); + return num_buffs_reaped; +} + +static int ath12k_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id, + int *budget, struct sk_buff_head *skb_list) +{ + const struct ath12k_hw_hal_params *hal_params; + int buf_id, srng_id, num_buffs_reaped = 0; + enum dp_mon_status_buf_state reap_status; + struct dp_rxdma_mon_ring *rx_ring; + struct ath12k_mon_data *pmon; + struct ath12k_skb_rxcb *rxcb; + struct hal_tlv_64_hdr *tlv; + void *rx_mon_status_desc; + struct hal_srng *srng; + struct ath12k_dp *dp; + struct sk_buff *skb; + struct ath12k *ar; + dma_addr_t paddr; + u32 cookie; + u8 rbm; + + ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar; + dp = &ab->dp; + pmon = &ar->dp.mon_data; + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); + rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, srng); + + while (*budget) { + *budget -= 1; + rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng); + if (!rx_mon_status_desc) { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + break; + } + ath12k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, + &cookie, &rbm); + if (paddr) { + buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) { + ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n", + buf_id); + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; + } + + rxcb = ATH12K_SKB_RXCB(skb); + + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_64_hdr *)skb->data; + if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != + HAL_RX_STATUS_BUFFER_DONE) { + pmon->buf_state = DP_MON_STATUS_NO_DMA; + ath12k_warn(ab, + "mon status DONE not set %llx, buf_id %d\n", + le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG), + buf_id); + /* RxDMA status done bit might not be set even + * though tp is moved by HW. + */ + + /* If done status is missing: + * 1. As per MAC team's suggestion, + * when HP + 1 entry is peeked and if DMA + * is not done and if HP + 2 entry's DMA done + * is set. skip HP + 1 entry and + * start processing in next interrupt. + * 2. If HP + 2 entry's DMA done is not set, + * poll onto HP + 1 entry DMA done to be set. + * Check status for same buffer for next time + * dp_rx_mon_status_srng_process + */ + reap_status = ath12k_dp_rx_mon_buf_done(ab, srng, + rx_ring); + if (reap_status == DP_MON_STATUS_NO_DMA) + continue; + + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); - if (ppdu_info->peer_id == HAL_INVALID_PEERID || - hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { dev_kfree_skb_any(skb); - continue; + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; } - rcu_read_lock(); - spin_lock_bh(&ab->base_lock); - peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); - if (!peer || !peer->sta) { - ath12k_dbg(ab, ATH12K_DBG_DATA, - "failed to find the peer with peer_id %d\n", - ppdu_info->peer_id); - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) { dev_kfree_skb_any(skb); + goto move_next; + } + __skb_queue_tail(skb_list, skb); + } else { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + } +move_next: + skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring, + &buf_id); + + if (!skb) { + ath12k_warn(ab, "failed to alloc buffer for status ring\n"); + hal_params = ab->hw_params->hal_params; + ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, + hal_params->rx_buf_rbm); + num_buffs_reaped++; + break; + } + rxcb = ATH12K_SKB_RXCB(skb); + + cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) | + u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); + + ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, + cookie, + ab->hw_params->hal_params->rx_buf_rbm); + ath12k_hal_srng_src_get_next_entry(ab, srng); + num_buffs_reaped++; + } + ath12k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return num_buffs_reaped; +} + +static u32 +ath12k_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id, + void *ring_entry, struct sk_buff **head_msdu, + struct sk_buff **tail_msdu, + struct list_head *used_list, + u32 *npackets, u32 *ppdu_id) +{ + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info; + u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0; + u32 rx_buf_size, rx_pkt_offset, sw_cookie; + bool is_frag, is_first_msdu, drop_mpdu = false; + struct hal_reo_entrance_ring *ent_desc = + (struct hal_reo_entrance_ring *)ring_entry; + u32 rx_bufs_used = 0, i = 0, desc_bank = 0; + struct hal_rx_desc *rx_desc, *tail_rx_desc; + struct hal_rx_msdu_link *msdu_link_desc; + struct sk_buff *msdu = NULL, *last = NULL; + struct ath12k_rx_desc_info *desc_info; + struct ath12k_buffer_addr buf_info; + struct hal_rx_msdu_list msdu_list; + struct ath12k_skb_rxcb *rxcb; + u16 num_msdus = 0; + dma_addr_t paddr; + u8 rbm; + + ath12k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, + &sw_cookie, + &p_last_buf_addr_info, &rbm, + &msdu_cnt); + + spin_lock_bh(&pmon->mon_lock); + + if (le32_get_bits(ent_desc->info1, + HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) == + HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { + u8 rxdma_err = le32_get_bits(ent_desc->info1, + HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE); + if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || + rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { + drop_mpdu = true; + pmon->rx_mon_stats.dest_mpdu_drop++; + } + } + + is_frag = false; + is_first_msdu = true; + rx_pkt_offset = sizeof(struct hal_rx_desc); + + do { + if (pmon->mon_last_linkdesc_paddr == paddr) { + pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; + spin_unlock_bh(&pmon->mon_lock); + return rx_bufs_used; + } + + desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK); + msdu_link_desc = + ar->ab->dp.link_desc_banks[desc_bank].vaddr + + (paddr - ar->ab->dp.link_desc_banks[desc_bank].paddr); + + ath12k_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list, + &num_msdus); + desc_info = ath12k_dp_get_rx_desc(ar->ab, + msdu_list.sw_cookie[num_msdus - 1]); + tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data; + + for (i = 0; i < num_msdus; i++) { + u32 l2_hdr_offset; + + if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d last_cookie %d is same\n", + i, pmon->mon_last_buf_cookie); + drop_mpdu = true; + pmon->rx_mon_stats.dup_mon_buf_cnt++; continue; } - if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { - ahsta = ath12k_sta_to_ahsta(peer->sta); - arsta = &ahsta->deflink; - ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, - ppdu_info); - } else if ((ppdu_info->fc_valid) && - (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { - ath12k_dp_mon_rx_process_ulofdma(ppdu_info); - ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info); + desc_info = + ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]); + msdu = desc_info->skb; + + if (!msdu) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "msdu_pop: invalid msdu (%d/%d)\n", + i + 1, num_msdus); + goto next_msdu; + } + rxcb = ATH12K_SKB_RXCB(msdu); + if (rxcb->paddr != msdu_list.paddr[i]) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d paddr %lx != %lx\n", + i, (unsigned long)rxcb->paddr, + (unsigned long)msdu_list.paddr[i]); + drop_mpdu = true; + continue; + } + if (!rxcb->unmapped) { + dma_unmap_single(ar->ab->dev, rxcb->paddr, + msdu->len + + skb_tailroom(msdu), + DMA_FROM_DEVICE); + rxcb->unmapped = 1; + } + if (drop_mpdu) { + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "i %d drop msdu %p *ppdu_id %x\n", + i, msdu, *ppdu_id); + dev_kfree_skb_any(msdu); + msdu = NULL; + goto next_msdu; } - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); - dev_kfree_skb_any(skb); - memset(ppdu_info, 0, sizeof(*ppdu_info)); - ppdu_info->peer_id = HAL_INVALID_PEERID; + rx_desc = (struct hal_rx_desc *)msdu->data; + l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc); + if (is_first_msdu) { + if (!ath12k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { + drop_mpdu = true; + dev_kfree_skb_any(msdu); + msdu = NULL; + pmon->mon_last_linkdesc_paddr = paddr; + goto next_msdu; + } + msdu_ppdu_id = + ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); + + if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id, + ppdu_id)) { + spin_unlock_bh(&pmon->mon_lock); + return rx_bufs_used; + } + pmon->mon_last_linkdesc_paddr = paddr; + is_first_msdu = false; + } + ath12k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], + &is_frag, &total_len, + &frag_len, &msdu_cnt); + rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; + + if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) { + dev_kfree_skb_any(msdu); + goto next_msdu; + } + + if (!(*head_msdu)) + *head_msdu = msdu; + else if (last) + last->next = msdu; + + last = msdu; +next_msdu: + pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; + rx_bufs_used++; + desc_info->skb = NULL; + list_add_tail(&desc_info->list, used_list); } - dest_idx = 0; -move_next: - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); - ath12k_hal_srng_src_get_next_entry(ab, srng); - num_buffs_reaped++; + ath12k_hal_rx_buf_addr_info_set(&buf_info, paddr, sw_cookie, rbm); + + ath12k_dp_mon_next_link_desc_get(msdu_link_desc, &paddr, + &sw_cookie, &rbm, + &p_buf_addr_info); + + ath12k_dp_rx_link_desc_return(ar->ab, &buf_info, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + + p_last_buf_addr_info = p_buf_addr_info; + + } while (paddr && msdu_cnt); + + spin_unlock_bh(&pmon->mon_lock); + + if (last) + last->next = NULL; + + *tail_msdu = msdu; + + if (msdu_cnt == 0) + *npackets = 1; + + return rx_bufs_used; +} + +/* The destination ring processing is stuck if the destination is not + * moving while status ring moves 16 PPDU. The destination ring processing + * skips this destination ring PPDU as a workaround. + */ +#define MON_DEST_RING_STUCK_MAX_CNT 16 + +static void ath12k_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id, + u32 quota, struct napi_struct *napi) +{ + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_pdev_mon_stats *rx_mon_stats; + u32 ppdu_id, rx_bufs_used = 0, ring_id; + u32 mpdu_rx_bufs_used, npackets = 0; + struct ath12k_dp *dp = &ar->ab->dp; + struct ath12k_base *ab = ar->ab; + void *ring_entry, *mon_dst_srng; + struct dp_mon_mpdu *tmp_mpdu; + LIST_HEAD(rx_desc_used_list); + struct hal_srng *srng; + + ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; + srng = &ab->hal.srng_list[ring_id]; + + mon_dst_srng = &ab->hal.srng_list[ring_id]; + + spin_lock_bh(&srng->lock); + + ath12k_hal_srng_access_begin(ab, mon_dst_srng); + + ppdu_id = pmon->mon_ppdu_info.ppdu_id; + rx_mon_stats = &pmon->rx_mon_stats; + + while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { + struct sk_buff *head_msdu, *tail_msdu; + + head_msdu = NULL; + tail_msdu = NULL; + + mpdu_rx_bufs_used = ath12k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, + &head_msdu, &tail_msdu, + &rx_desc_used_list, + &npackets, &ppdu_id); + + rx_bufs_used += mpdu_rx_bufs_used; + + if (mpdu_rx_bufs_used) { + dp->mon_dest_ring_stuck_cnt = 0; + } else { + dp->mon_dest_ring_stuck_cnt++; + rx_mon_stats->dest_mon_not_reaped++; + } + + if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { + rx_mon_stats->dest_mon_stuck++; + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", + pmon->mon_ppdu_info.ppdu_id, ppdu_id, + dp->mon_dest_ring_stuck_cnt, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); + spin_lock_bh(&pmon->mon_lock); + pmon->mon_ppdu_info.ppdu_id = ppdu_id; + spin_unlock_bh(&pmon->mon_lock); + continue; + } + + if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { + spin_lock_bh(&pmon->mon_lock); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + spin_unlock_bh(&pmon->mon_lock); + ath12k_dbg(ar->ab, ATH12K_DBG_DATA, + "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", + ppdu_id, pmon->mon_ppdu_info.ppdu_id, + rx_mon_stats->dest_mon_not_reaped, + rx_mon_stats->dest_mon_stuck); + break; + } + + if (head_msdu && tail_msdu) { + tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC); + if (!tmp_mpdu) + break; + + tmp_mpdu->head = head_msdu; + tmp_mpdu->tail = tail_msdu; + tmp_mpdu->err_bitmap = pmon->err_bitmap; + tmp_mpdu->decap_format = pmon->decap_format; + ath12k_dp_mon_rx_deliver(ar, tmp_mpdu, + &pmon->mon_ppdu_info, napi); + rx_mon_stats->dest_mpdu_done++; + kfree(tmp_mpdu); + } + + ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab, + mon_dst_srng); } + ath12k_hal_srng_access_end(ar->ab, mon_dst_srng); - ath12k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); + + if (rx_bufs_used) { + rx_mon_stats->dest_ppdu_done++; + ath12k_dp_rx_bufs_replenish(ar->ab, + &dp->rx_refill_buf_ring, + &rx_desc_used_list, + rx_bufs_used); + } +} + +static int +__ath12k_dp_mon_process_ring(struct ath12k *ar, int mac_id, + struct napi_struct *napi, int *budget) +{ + struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data; + struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + enum hal_rx_mon_status hal_status; + struct sk_buff_head skb_list; + int num_buffs_reaped; + struct sk_buff *skb; + + __skb_queue_head_init(&skb_list); + + num_buffs_reaped = ath12k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, + budget, &skb_list); + if (!num_buffs_reaped) + goto exit; + + while ((skb = __skb_dequeue(&skb_list))) { + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + + hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb); + + if (ar->monitor_started && + pmon->mon_ppdu_status == DP_PPDU_STATUS_START && + hal_status == HAL_TLV_STATUS_PPDU_DONE) { + rx_mon_stats->status_ppdu_done++; + pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; + ath12k_dp_rx_mon_dest_process(ar, mac_id, *budget, napi); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + } + + dev_kfree_skb_any(skb); + } + +exit: return num_buffs_reaped; } @@ -2535,11 +4378,14 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id); int num_buffs_reaped = 0; - if (!ar->monitor_started) - ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget); - else - num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget, - monitor_mode, napi); + if (ab->hw_params->rxdma1_enable) { + if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) + num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi); + } else { + if (ar->monitor_started) + num_buffs_reaped = + __ath12k_dp_mon_process_ring(ar, mac_id, napi, &budget); + } return num_buffs_reaped; } |